diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h index cdd51bf764..801cc92cf6 100644 --- a/lib/pacemaker/libpacemaker_private.h +++ b/lib/pacemaker/libpacemaker_private.h @@ -1,830 +1,830 @@ /* * Copyright 2021-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__LIBPACEMAKER_PRIVATE__H # define PCMK__LIBPACEMAKER_PRIVATE__H /* This header is for the sole use of libpacemaker, so that functions can be * declared with G_GNUC_INTERNAL for efficiency. */ #include // pe_action_t, pe_node_t, pe_working_set_t // Flags to modify the behavior of pcmk__add_colocated_node_scores() enum pcmk__coloc_select { // With no other flags, apply all "with this" colocations pcmk__coloc_select_default = 0, // Apply "this with" colocations instead of "with this" colocations pcmk__coloc_select_this_with = (1 << 0), // Apply only colocations with non-negative scores pcmk__coloc_select_nonnegative = (1 << 1), // Apply only colocations with at least one matching node pcmk__coloc_select_active = (1 << 2), }; // Flags the update_ordered_actions() method can return enum pcmk__updated { pcmk__updated_none = 0, // Nothing changed pcmk__updated_first = (1 << 0), // First action was updated pcmk__updated_then = (1 << 1), // Then action was updated }; #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do { \ au_flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Action update", \ (action)->uuid, au_flags, \ (flags_to_set), #flags_to_set); \ } while (0) #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do { \ au_flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Action update", \ (action)->uuid, au_flags, \ (flags_to_clear), #flags_to_clear); \ } while (0) // Resource allocation methods struct resource_alloc_functions_s { /*! * \internal * \brief Assign a resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer); /*! * \internal * \brief Create all actions needed for a given resource * * \param[in,out] rsc Resource to create actions for */ void (*create_actions)(pe_resource_t *rsc); /*! * \internal * \brief Schedule any probes needed for a resource on a node * * \param[in,out] rsc Resource to create probe for * \param[in,out] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node); /*! * \internal * \brief Create implicit constraints needed for a resource * * \param[in,out] rsc Resource to create implicit constraints for */ void (*internal_constraints)(pe_resource_t *rsc); /*! * \internal * \brief Apply a colocation's score to node weights or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node weights (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void (*apply_coloc_score) (pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); /*! * \internal * \brief Create list of all resources in colocations with a given resource * * Given a resource, create a list of all resources involved in mandatory * colocations with it, whether directly or indirectly via chained colocations. * * \param[in] rsc Resource to add to colocated list * \param[in] orig_rsc Resource originally requested * \param[in,out] colocated_rscs Existing list * * \return List of given resource and all resources involved in colocations * * \note This function is recursive; top-level callers should pass NULL as * \p colocated_rscs and \p orig_rsc, and the desired resource as * \p rsc. The recursive calls will use other values. */ GList *(*colocated_resources)(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs); /*! * \internal * \brief Apply a location constraint to a resource's allowed node scores * * \param[in,out] rsc Resource to apply constraint to * \param[in,out] location Location constraint to apply */ void (*apply_location)(pe_resource_t *rsc, pe__location_t *location); /*! * \internal * \brief Return action flags for a given resource action * * \param[in,out] action Action to get flags for * \param[in] node If not NULL, limit effects to this node * * \return Flags appropriate to \p action on \p node * \note For primitives, this will be the same as action->flags regardless * of node. For collective resources, the flags can differ due to * multiple instances possibly being involved. */ enum pe_action_flags (*action_flags)(pe_action_t *action, const pe_node_t *node); /*! * \internal * \brief Update two actions according to an ordering between them * * Given information about an ordering of two actions, update the actions' * flags (and runnable_before members if appropriate) as appropriate for the * ordering. In some cases, the ordering could be disabled as well. * * \param[in,out] first 'First' action in an ordering * \param[in,out] then 'Then' action in an ordering * \param[in] node If not NULL, limit scope of ordering to this * node (only used when interleaving instances) * \param[in] flags Action flags for \p first for ordering purposes * \param[in] filter Action flags to limit scope of certain updates * (may include pe_action_optional to affect only * mandatory actions, and pe_action_runnable to * affect only runnable actions) * \param[in] type Group of enum pe_ordering flags to apply * \param[in,out] data_set Cluster working set * * \return Group of enum pcmk__updated flags indicating what was updated */ uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); void (*output_actions)(pe_resource_t *rsc); /*! * \internal * \brief Add a resource's actions to the transition graph * * \param[in,out] rsc Resource whose actions should be added */ void (*add_actions_to_graph)(pe_resource_t *rsc); /*! * \internal * \brief Add meta-attributes relevant to transition graph actions to XML * * If a given resource supports variant-specific meta-attributes that are * needed for transition graph actions, add them to a given XML element. * * \param[in] rsc Resource whose meta-attributes should be added * \param[in,out] xml Transition graph action attributes XML to add to */ void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml); /*! * \internal * \brief Add a resource's utilization to a table of utilization values * * This function is used when summing the utilization of a resource and all * resources colocated with it, to determine whether a node has sufficient * capacity. Given a resource and a table of utilization values, it will add * the resource's utilization to the existing values, if the resource has * not yet been allocated to a node. * * \param[in] rsc Resource with utilization to add * \param[in] orig_rsc Resource being allocated (for logging only) * \param[in] all_rscs List of all resources that will be summed * \param[in,out] utilization Table of utilization values to add to */ void (*add_utilization)(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); /*! * \internal * \brief Apply a shutdown lock for a resource, if appropriate * * \param[in,out] rsc Resource to check for shutdown lock */ void (*shutdown_lock)(pe_resource_t *rsc); }; // Actions (pcmk_sched_actions.c) G_GNUC_INTERNAL void pcmk__update_action_for_orderings(pe_action_t *action, pe_working_set_t *data_set); G_GNUC_INTERNAL uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details); G_GNUC_INTERNAL pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name, guint interval_ms, const pe_node_t *node); G_GNUC_INTERNAL pe_action_t *pcmk__new_shutdown_action(pe_node_t *node); G_GNUC_INTERNAL bool pcmk__action_locks_rsc_to_node(const pe_action_t *action); G_GNUC_INTERNAL void pcmk__deduplicate_action_inputs(pe_action_t *action); G_GNUC_INTERNAL void pcmk__output_actions(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, const xmlNode *xml_op); G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set); // Recurring actions (pcmk_sched_recurring.c) G_GNUC_INTERNAL void pcmk__create_recurring_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task, guint interval_ms, const pe_node_t *node, const char *reason); G_GNUC_INTERNAL void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node); G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pe_action_t *action); // Producing transition graphs (pcmk_graph_producer.c) G_GNUC_INTERNAL bool pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action, pe_action_wrapper_t *input); G_GNUC_INTERNAL void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__create_graph(pe_working_set_t *data_set); // Fencing (pcmk_sched_fencing.c) G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set); G_GNUC_INTERNAL -void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, +void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order); G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node); G_GNUC_INTERNAL -bool pcmk__node_unfenced(pe_node_t *node); +bool pcmk__node_unfenced(const pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data); // Injected scheduler inputs (pcmk_sched_injections.c) void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib, const pcmk_injections_t *injections); // Constraints of any type (pcmk_sched_constraints.c) G_GNUC_INTERNAL pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id); G_GNUC_INTERNAL xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set, const char *id, pe_resource_t **rsc, pe_tag_t **tag); G_GNUC_INTERNAL bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr, bool convert_rsc, const pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__create_internal_constraints(pe_working_set_t *data_set); // Location constraints G_GNUC_INTERNAL void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc, int node_weight, const char *discover_mode, pe_node_t *foo_node, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint); // Colocation constraints (pcmk_sched_colocation.c) enum pcmk__coloc_affects { pcmk__coloc_affects_nothing = 0, pcmk__coloc_affects_location, pcmk__coloc_affects_role, }; G_GNUC_INTERNAL enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool preview); G_GNUC_INTERNAL void pcmk__apply_coloc_to_weights(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__apply_coloc_to_priority(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id, GHashTable **nodes, const char *attr, float factor, uint32_t flags); G_GNUC_INTERNAL void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__add_this_with(pe_resource_t *rsc, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__add_with_this(pe_resource_t *rsc, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *dependent, pe_resource_t *primary, const char *dependent_role, const char *primary_role, bool influence, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__block_colocation_dependents(pe_action_t *action, pe_working_set_t *data_set); /*! * \internal * \brief Check whether colocation's dependent preferences should be considered * * \param[in] colocation Colocation constraint * \param[in] rsc Primary instance (normally this will be * colocation->primary, which NULL will be treated as, * but for clones or bundles with multiple instances * this can be a particular instance) * * \return true if colocation influence should be effective, otherwise false */ static inline bool pcmk__colocation_has_influence(const pcmk__colocation_t *colocation, const pe_resource_t *rsc) { if (rsc == NULL) { rsc = colocation->primary; } /* A bundle replica colocates its remote connection with its container, * using a finite score so that the container can run on Pacemaker Remote * nodes. * * Moving a connection is lightweight and does not interrupt the service, * while moving a container is heavyweight and does interrupt the service, * so don't move a clean, active container based solely on the preferences * of its connection. * * This also avoids problematic scenarios where two containers want to * perpetually swap places. */ if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes) && !pcmk_is_set(rsc->flags, pe_rsc_failed) && pcmk__list_of_1(rsc->running_on)) { return false; } /* The dependent in a colocation influences the primary's location * if the influence option is true or the primary is not yet active. */ return colocation->influence || (rsc->running_on == NULL); } // Ordering constraints (pcmk_sched_ordering.c) G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__disable_invalid_orderings(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op); G_GNUC_INTERNAL void pcmk__apply_orderings(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_after_each(pe_action_t *after, GList *list); /*! * \internal * \brief Create a new ordering between two resource actions * * \param[in,out] first_rsc Resource for 'first' action * \param[in,out] first_task Action key for 'first' action * \param[in] then_rsc Resource for 'then' action * \param[in,out] then_task Action key for 'then' action * \param[in] flags Bitmask of enum pe_ordering flags */ #define pcmk__order_resource_actions(first_rsc, first_task, \ then_rsc, then_task, flags) \ pcmk__new_ordering((first_rsc), \ pcmk__op_key((first_rsc)->id, (first_task), 0), \ NULL, \ (then_rsc), \ pcmk__op_key((then_rsc)->id, (then_task), 0), \ NULL, (flags), (first_rsc)->cluster) #define pcmk__order_starts(rsc1, rsc2, flags) \ pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \ (rsc2), CRMD_ACTION_START, (flags)) #define pcmk__order_stops(rsc1, rsc2, flags) \ pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \ (rsc2), CRMD_ACTION_STOP, (flags)) // Ticket constraints (pcmk_sched_tickets.c) G_GNUC_INTERNAL void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set); // Promotable clone resources (pcmk_sched_promotable.c) G_GNUC_INTERNAL void pcmk__add_promotion_scores(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__require_promotion_tickets(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__set_instance_roles(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__create_promotable_actions(pe_resource_t *clone); G_GNUC_INTERNAL void pcmk__promotable_restart_ordering(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__order_promotable_instances(pe_resource_t *clone); G_GNUC_INTERNAL void pcmk__update_dependent_with_promotable(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation); G_GNUC_INTERNAL void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation); // Pacemaker Remote nodes (pcmk_sched_remote.c) G_GNUC_INTERNAL bool pcmk__is_failed_remote_node(const pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node); G_GNUC_INTERNAL pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action); G_GNUC_INTERNAL void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params); G_GNUC_INTERNAL void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action); // Primitives (pcmk_sched_primitive.c) G_GNUC_INTERNAL pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer); G_GNUC_INTERNAL void pcmk__primitive_create_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__primitive_internal_constraints(pe_resource_t *rsc); G_GNUC_INTERNAL enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node); G_GNUC_INTERNAL void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional); G_GNUC_INTERNAL void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); G_GNUC_INTERNAL void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); G_GNUC_INTERNAL void pcmk__primitive_shutdown_lock(pe_resource_t *rsc); // Groups (pcmk_sched_group.c) G_GNUC_INTERNAL pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer); G_GNUC_INTERNAL void pcmk__group_create_actions(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__group_internal_constraints(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__group_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location); G_GNUC_INTERNAL enum pe_action_flags pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node); G_GNUC_INTERNAL uint32_t pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set); G_GNUC_INTERNAL GList *pcmk__group_colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs); G_GNUC_INTERNAL void pcmk__group_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization); G_GNUC_INTERNAL void pcmk__group_shutdown_lock(pe_resource_t *rsc); // Clones (pcmk_sched_clone.c) G_GNUC_INTERNAL void pcmk__clone_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); // Bundles (pcmk_sched_bundle.c) G_GNUC_INTERNAL void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent); G_GNUC_INTERNAL void pcmk__output_bundle_actions(pe_resource_t *rsc); // Injections (pcmk_injections.c) G_GNUC_INTERNAL xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid); G_GNUC_INTERNAL xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up); G_GNUC_INTERNAL xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *lrm_name, const char *rclass, const char *rtype, const char *rprovider); G_GNUC_INTERNAL void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *task, guint interval_ms, int rc); G_GNUC_INTERNAL xmlNode *pcmk__inject_action_result(xmlNode *cib_resource, lrmd_event_data_t *op, int target_rc); // Nodes (pcmk_sched_nodes.c) G_GNUC_INTERNAL bool pcmk__node_available(const pe_node_t *node, bool consider_score, bool consider_guest); G_GNUC_INTERNAL bool pcmk__any_node_available(GHashTable *nodes); G_GNUC_INTERNAL GHashTable *pcmk__copy_node_table(GHashTable *nodes); G_GNUC_INTERNAL GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node); G_GNUC_INTERNAL void pcmk__apply_node_health(pe_working_set_t *data_set); G_GNUC_INTERNAL pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node); // Functions applying to more than one variant (pcmk_sched_resource.c) G_GNUC_INTERNAL void pcmk__set_allocation_methods(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_entry, bool active_on_node); G_GNUC_INTERNAL -GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set); +GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set); G_GNUC_INTERNAL GList *pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs); G_GNUC_INTERNAL void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml); G_GNUC_INTERNAL void pcmk__output_resource_actions(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force); G_GNUC_INTERNAL bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force); G_GNUC_INTERNAL void pcmk__unassign_resource(pe_resource_t *rsc); G_GNUC_INTERNAL -bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node, +bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node, pe_resource_t **failed); G_GNUC_INTERNAL void pcmk__sort_resources(pe_working_set_t *data_set); G_GNUC_INTERNAL gint pcmk__cmp_instance(gconstpointer a, gconstpointer b); G_GNUC_INTERNAL gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b); // Functions related to probes (pcmk_sched_probes.c) G_GNUC_INTERNAL bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_probes(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node); G_GNUC_INTERNAL void pcmk__schedule_probes(pe_working_set_t *data_set); // Functions related to live migration (pcmk_sched_migration.c) void pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current); void pcmk__abort_dangling_migration(void *data, void *user_data); bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current); void pcmk__order_migration_equivalents(pe__ordering_t *order); // Functions related to node utilization (pcmk_sched_utilization.c) G_GNUC_INTERNAL int pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2); G_GNUC_INTERNAL void pcmk__consume_node_capacity(GHashTable *current_utilization, const pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__release_node_capacity(GHashTable *current_utilization, const pe_resource_t *rsc); G_GNUC_INTERNAL const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__create_utilization_constraints(pe_resource_t *rsc, const GList *allowed_nodes); G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set); #endif // PCMK__LIBPACEMAKER_PRIVATE__H diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c index 8ab9981a0a..073d0b5fa9 100644 --- a/lib/pacemaker/pcmk_output.c +++ b/lib/pacemaker/pcmk_output.c @@ -1,2315 +1,2315 @@ /* - * Copyright 2019-2022 the Pacemaker project contributors + * Copyright 2019-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include static char * colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons, bool dependents) { char *retval = NULL; if (cons->primary_role > RSC_ROLE_STARTED) { retval = crm_strdup_printf("%s (score=%s, %s role=%s, id=%s)", rsc->id, pcmk_readable_score(cons->score), (dependents? "needs" : "with"), role2text(cons->primary_role), cons->id); } else { retval = crm_strdup_printf("%s (score=%s, id=%s)", rsc->id, pcmk_readable_score(cons->score), cons->id); } return retval; } static void colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc, pcmk__colocation_t *cons) { xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_DEPEND, "id", cons->id, "rsc", cons->dependent->id, "with-rsc", cons->primary->id, "score", pcmk_readable_score(cons->score), NULL); if (cons->node_attribute) { xmlSetProp(node, (pcmkXmlStr) "node-attribute", (pcmkXmlStr) cons->node_attribute); } if (cons->dependent_role != RSC_ROLE_UNKNOWN) { xmlSetProp(node, (pcmkXmlStr) "rsc-role", (pcmkXmlStr) role2text(cons->dependent_role)); } if (cons->primary_role != RSC_ROLE_UNKNOWN) { xmlSetProp(node, (pcmkXmlStr) "with-rsc-role", (pcmkXmlStr) role2text(cons->primary_role)); } } static int do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header) { GList *lpc = NULL; GList *list = rsc->rsc_location; int rc = pcmk_rc_no_output; for (lpc = list; lpc != NULL; lpc = lpc->next) { pe__location_t *cons = lpc->data; GList *lpc2 = NULL; for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { pe_node_t *node = (pe_node_t *) lpc2->data; if (add_header) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "locations"); } pcmk__output_create_xml_node(out, XML_CONS_TAG_RSC_LOCATION, "node", node->details->uname, "rsc", rsc->id, "id", cons->id, "score", pcmk_readable_score(node->weight), NULL); } } if (add_header) { PCMK__OUTPUT_LIST_FOOTER(out, rc); } return rc; } PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *", "pe_node_t *", "pe_node_t *", "pe_action_t *", "pe_action_t *") static int rsc_action_item(pcmk__output_t *out, va_list args) { const char *change = va_arg(args, const char *); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_node_t *origin = va_arg(args, pe_node_t *); pe_node_t *destination = va_arg(args, pe_node_t *); pe_action_t *action = va_arg(args, pe_action_t *); pe_action_t *source = va_arg(args, pe_action_t *); int len = 0; char *reason = NULL; char *details = NULL; bool same_host = false; bool same_role = false; bool need_role = false; static int rsc_width = 5; static int detail_width = 5; CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); if(source == NULL) { source = action; } len = strlen(rsc->id); if(len > rsc_width) { rsc_width = len + 2; } if ((rsc->role > RSC_ROLE_STARTED) || (rsc->next_role > RSC_ROLE_UNPROMOTED)) { need_role = true; } if(origin != NULL && destination != NULL && origin->details == destination->details) { same_host = true; } if(rsc->role == rsc->next_role) { same_role = true; } if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), pe__node_name(destination)); } else if (origin == NULL) { /* Starting a resource */ details = crm_strdup_printf("%s", pe__node_name(destination)); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ details = crm_strdup_printf("%s %s", role2text(rsc->role), pe__node_name(origin)); } else if (destination == NULL) { /* Stopping a resource */ details = crm_strdup_printf("%s", pe__node_name(origin)); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ details = crm_strdup_printf("%s %s", role2text(rsc->role), pe__node_name(origin)); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ details = crm_strdup_printf("%s", pe__node_name(origin)); } else if (need_role && same_role) { /* Moving a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", pe__node_name(origin), pe__node_name(destination), role2text(rsc->role)); } else if (same_role) { /* Moving a normal resource */ details = crm_strdup_printf("%s -> %s", pe__node_name(origin), pe__node_name(destination)); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), pe__node_name(origin)); } else { /* Moving and promoting/demoting */ details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), pe__node_name(origin), role2text(rsc->next_role), pe__node_name(destination)); } len = strlen(details); if(len > detail_width) { detail_width = len; } if(source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) { reason = crm_strdup_printf("due to %s (blocked)", source->reason); } else if(source->reason) { reason = crm_strdup_printf("due to %s", source->reason); } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { reason = strdup("blocked"); } out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s", change, rsc_width, rsc->id, detail_width, details, reason ? " " : "", reason ? reason : ""); free(details); free(reason); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *", "pe_node_t *", "pe_node_t *", "pe_action_t *", "pe_action_t *") static int rsc_action_item_xml(pcmk__output_t *out, va_list args) { const char *change = va_arg(args, const char *); pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_node_t *origin = va_arg(args, pe_node_t *); pe_node_t *destination = va_arg(args, pe_node_t *); pe_action_t *action = va_arg(args, pe_action_t *); pe_action_t *source = va_arg(args, pe_action_t *); char *change_str = NULL; bool same_host = false; bool same_role = false; bool need_role = false; xmlNode *xml = NULL; CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); if (source == NULL) { source = action; } if ((rsc->role > RSC_ROLE_STARTED) || (rsc->next_role > RSC_ROLE_UNPROMOTED)) { need_role = true; } if(origin != NULL && destination != NULL && origin->details == destination->details) { same_host = true; } if(rsc->role == rsc->next_role) { same_role = true; } change_str = g_ascii_strdown(change, -1); xml = pcmk__output_create_xml_node(out, "rsc_action", "action", change_str, "resource", rsc->id, NULL); g_free(change_str); if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "next-role", role2text(rsc->next_role), "dest", destination->details->uname, NULL); } else if (origin == NULL) { /* Starting a resource */ crm_xml_add(xml, "node", destination->details->uname); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "node", origin->details->uname, NULL); } else if (destination == NULL) { /* Stopping a resource */ crm_xml_add(xml, "node", origin->details->uname); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "source", origin->details->uname, NULL); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ crm_xml_add(xml, "source", origin->details->uname); } else if (need_role && same_role) { /* Moving a promotable clone instance */ pcmk__xe_set_props(xml, "source", origin->details->uname, "dest", destination->details->uname, "role", role2text(rsc->role), NULL); } else if (same_role) { /* Moving a normal resource */ pcmk__xe_set_props(xml, "source", origin->details->uname, "dest", destination->details->uname, NULL); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "next-role", role2text(rsc->next_role), "source", origin->details->uname, NULL); } else { /* Moving and promoting/demoting */ pcmk__xe_set_props(xml, "role", role2text(rsc->role), "source", origin->details->uname, "next-role", role2text(rsc->next_role), "dest", destination->details->uname, NULL); } if (source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) { pcmk__xe_set_props(xml, "reason", source->reason, "blocked", "true", NULL); } else if(source->reason) { crm_xml_add(xml, "reason", source->reason); } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { pcmk__xe_set_bool_attr(xml, "blocked", true); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool") static int rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) { return rc; } pe__set_resource_flags(rsc, pe_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; char *hdr = NULL; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources %s is colocated with", rsc->id); if (pcmk_is_set(cons->primary->flags, pe_rsc_detect_loop)) { out->list_item(out, NULL, "%s (id=%s - loop)", cons->primary->id, cons->id); continue; } hdr = colocations_header(cons->primary, cons, false); out->list_item(out, NULL, "%s", hdr); free(hdr); /* Empty list header just for indentation of information about this resource. */ out->begin_list(out, NULL, NULL, NULL); out->message(out, "locations-list", cons->primary); if (recursive) { out->message(out, "rsc-is-colocated-with-list", cons->primary, recursive); } out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool") static int rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) { return rc; } pe__set_resource_flags(rsc, pe_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; if (pcmk_is_set(cons->primary->flags, pe_rsc_detect_loop)) { colocations_xml_node(out, cons->primary, cons); continue; } colocations_xml_node(out, cons->primary, cons); do_locations_list_xml(out, cons->primary, false); if (recursive) { out->message(out, "rsc-is-colocated-with-list", cons->primary, recursive); } } return rc; } PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool") static int rscs_colocated_with_list(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) { return rc; } pe__set_resource_flags(rsc, pe_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; char *hdr = NULL; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s", rsc->id); if (pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)) { out->list_item(out, NULL, "%s (id=%s - loop)", cons->dependent->id, cons->id); continue; } hdr = colocations_header(cons->dependent, cons, true); out->list_item(out, NULL, "%s", hdr); free(hdr); /* Empty list header just for indentation of information about this resource. */ out->begin_list(out, NULL, NULL, NULL); out->message(out, "locations-list", cons->dependent); if (recursive) { out->message(out, "rscs-colocated-with-list", cons->dependent, recursive); } out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool") static int rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) { return rc; } pe__set_resource_flags(rsc, pe_rsc_detect_loop); for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; if (pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)) { colocations_xml_node(out, cons->dependent, cons); continue; } colocations_xml_node(out, cons->dependent, cons); do_locations_list_xml(out, cons->dependent, false); if (recursive) { out->message(out, "rscs-colocated-with-list", cons->dependent, recursive); } } return rc; } PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") static int locations_list(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); GList *lpc = NULL; GList *list = rsc->rsc_location; int rc = pcmk_rc_no_output; for (lpc = list; lpc != NULL; lpc = lpc->next) { pe__location_t *cons = lpc->data; GList *lpc2 = NULL; for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) { pe_node_t *node = (pe_node_t *) lpc2->data; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Locations"); out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)", pe__node_name(node), pcmk_readable_score(node->weight), cons->id, rsc->id); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *") static int locations_list_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); return do_locations_list_xml(out, rsc, true); } PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *", "pe_working_set_t *", "bool", "bool") static int locations_and_colocations(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); bool recursive = va_arg(args, int); bool force = va_arg(args, int); pcmk__unpack_constraints(data_set); // Constraints apply to group/clone, not member/instance if (!force) { rsc = uber_parent(rsc); } out->message(out, "locations-list", rsc); pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop); out->message(out, "rscs-colocated-with-list", rsc, recursive); pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop); out->message(out, "rsc-is-colocated-with-list", rsc, recursive); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *", "pe_working_set_t *", "bool", "bool") static int locations_and_colocations_xml(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); bool recursive = va_arg(args, int); bool force = va_arg(args, int); pcmk__unpack_constraints(data_set); // Constraints apply to group/clone, not member/instance if (!force) { rsc = uber_parent(rsc); } pcmk__output_xml_create_parent(out, "constraints", NULL); do_locations_list_xml(out, rsc, false); pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop); out->message(out, "rscs-colocated-with-list", rsc, recursive); pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop); out->message(out, "rsc-is-colocated-with-list", rsc, recursive); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health(pcmk__output_t *out, va_list args) { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); const char *host_from = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result = va_arg(args, const char *); return out->info(out, "Controller on %s in state %s: %s", pcmk__s(host_from, "unknown node"), pcmk__s(fsa_state, "unknown"), pcmk__s(result, "unknown result")); } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return health(out, args); } else { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); const char *host_from G_GNUC_UNUSED = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result G_GNUC_UNUSED = va_arg(args, const char *); if (fsa_state != NULL) { pcmk__formatted_printf(out, "%s\n", fsa_state); return pcmk_rc_ok; } } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health_xml(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); const char *host_from = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result = va_arg(args, const char *); pcmk__output_create_xml_node(out, pcmk__s(sys_from, ""), "node_name", pcmk__s(host_from, ""), "state", pcmk__s(fsa_state, ""), "result", pcmk__s(result, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "long long") static int pacemakerd_health(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = (time_t) va_arg(args, long long); char *last_updated_s = NULL; int rc = pcmk_rc_ok; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = "pacemaker-remoted"; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk__pcmkd_state_enum2friendly(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } rc = out->info(out, "Status of %s: '%s' (last updated %s)", sys_from, state_s, pcmk__s(last_updated_s, "at unknown time")); free(last_updated_s); return rc; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "long long") static int pacemakerd_health_html(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = (time_t) va_arg(args, long long); char *last_updated_s = NULL; char *msg = NULL; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = "pacemaker-remoted"; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk__pcmkd_state_enum2friendly(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } msg = crm_strdup_printf("Status of %s: '%s' (last updated %s)", sys_from, state_s, pcmk__s(last_updated_s, "at unknown time")); pcmk__output_create_html_node(out, "li", NULL, NULL, msg); free(msg); free(last_updated_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "long long") static int pacemakerd_health_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return pacemakerd_health(out, args); } else { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated G_GNUC_UNUSED = (time_t) va_arg(args, long long); if (state_s == NULL) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state); } pcmk__formatted_printf(out, "%s\n", state_s); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "long long") static int pacemakerd_health_xml(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = (time_t) va_arg(args, long long); char *last_updated_s = NULL; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = "pacemaker-remoted"; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } pcmk__output_create_xml_node(out, "pacemakerd", "sys_from", sys_from, "state", state_s, "last_updated", last_updated_s, NULL); free(last_updated_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("profile", "const char *", "clock_t", "clock_t") static int profile_default(pcmk__output_t *out, va_list args) { const char *xml_file = va_arg(args, const char *); clock_t start = va_arg(args, clock_t); clock_t end = va_arg(args, clock_t); out->list_item(out, NULL, "Testing %s ... %.2f secs", xml_file, (end - start) / (float) CLOCKS_PER_SEC); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("profile", "const char *", "clock_t", "clock_t") static int profile_xml(pcmk__output_t *out, va_list args) { const char *xml_file = va_arg(args, const char *); clock_t start = va_arg(args, clock_t); clock_t end = va_arg(args, clock_t); char *duration = pcmk__ftoa((end - start) / (float) CLOCKS_PER_SEC); pcmk__output_create_xml_node(out, "timing", "file", xml_file, "duration", duration, NULL); free(duration); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc(pcmk__output_t *out, va_list args) { const char *dc = va_arg(args, const char *); return out->info(out, "Designated Controller is: %s", pcmk__s(dc, "not yet elected")); } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return dc(out, args); } else { const char *dc = va_arg(args, const char *); if (dc != NULL) { pcmk__formatted_printf(out, "%s\n", pcmk__s(dc, "")); return pcmk_rc_ok; } } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc_xml(pcmk__output_t *out, va_list args) { const char *dc = va_arg(args, const char *); pcmk__output_create_xml_node(out, "dc", "node_name", pcmk__s(dc, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node(pcmk__output_t *out, va_list args) { const char *type = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id = va_arg(args, const char *); bool bash_export = va_arg(args, int); if (bash_export) { return out->info(out, "export %s=%s", pcmk__s(name, ""), pcmk__s(id, "")); } else { return out->info(out, "%s node: %s (%s)", type ? type : "cluster", pcmk__s(name, ""), pcmk__s(id, "")); } } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return crmadmin_node(out, args); } else { const char *type G_GNUC_UNUSED = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id G_GNUC_UNUSED = va_arg(args, const char *); bool bash_export G_GNUC_UNUSED = va_arg(args, int); pcmk__formatted_printf(out, "%s\n", pcmk__s(name, "")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node_xml(pcmk__output_t *out, va_list args) { const char *type = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id = va_arg(args, const char *); bool bash_export G_GNUC_UNUSED = va_arg(args, int); pcmk__output_create_xml_node(out, "node", "type", type ? type : "cluster", "name", pcmk__s(name, ""), "id", pcmk__s(id, ""), NULL); return pcmk_rc_ok; } -PCMK__OUTPUT_ARGS("digests", "pe_resource_t *", "pe_node_t *", "const char *", - "guint", "op_digest_cache_t *") +PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *", + "const char *", "guint", "const op_digest_cache_t *") static int digests_text(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - pe_node_t *node = va_arg(args, pe_node_t *); + const pe_resource_t *rsc = va_arg(args, const pe_resource_t *); + const pe_node_t *node = va_arg(args, const pe_node_t *); const char *task = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); - op_digest_cache_t *digests = va_arg(args, op_digest_cache_t *); + const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *); char *action_desc = NULL; const char *rsc_desc = "unknown resource"; const char *node_desc = "unknown node"; if (interval_ms != 0) { action_desc = crm_strdup_printf("%ums-interval %s action", interval_ms, ((task == NULL)? "unknown" : task)); } else if (pcmk__str_eq(task, "monitor", pcmk__str_none)) { action_desc = strdup("probe action"); } else { action_desc = crm_strdup_printf("%s action", ((task == NULL)? "unknown" : task)); } if ((rsc != NULL) && (rsc->id != NULL)) { rsc_desc = rsc->id; } if ((node != NULL) && (node->details->uname != NULL)) { node_desc = node->details->uname; } out->begin_list(out, NULL, NULL, "Digests for %s %s on %s", rsc_desc, action_desc, node_desc); free(action_desc); if (digests == NULL) { out->list_item(out, NULL, "none"); out->end_list(out); return pcmk_rc_ok; } if (digests->digest_all_calc != NULL) { out->list_item(out, NULL, "%s (all parameters)", digests->digest_all_calc); } if (digests->digest_secure_calc != NULL) { out->list_item(out, NULL, "%s (non-private parameters)", digests->digest_secure_calc); } if (digests->digest_restart_calc != NULL) { out->list_item(out, NULL, "%s (non-reloadable parameters)", digests->digest_restart_calc); } out->end_list(out); return pcmk_rc_ok; } static void add_digest_xml(xmlNode *parent, const char *type, const char *digest, xmlNode *digest_source) { if (digest != NULL) { xmlNodePtr digest_xml = create_xml_node(parent, "digest"); crm_xml_add(digest_xml, "type", ((type == NULL)? "unspecified" : type)); crm_xml_add(digest_xml, "hash", digest); if (digest_source != NULL) { add_node_copy(digest_xml, digest_source); } } } -PCMK__OUTPUT_ARGS("digests", "pe_resource_t *", "pe_node_t *", "const char *", - "guint", "op_digest_cache_t *") +PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *", + "const char *", "guint", "const op_digest_cache_t *") static int digests_xml(pcmk__output_t *out, va_list args) { - pe_resource_t *rsc = va_arg(args, pe_resource_t *); - pe_node_t *node = va_arg(args, pe_node_t *); + const pe_resource_t *rsc = va_arg(args, const pe_resource_t *); + const pe_node_t *node = va_arg(args, const pe_node_t *); const char *task = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); - op_digest_cache_t *digests = va_arg(args, op_digest_cache_t *); + const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *); char *interval_s = crm_strdup_printf("%ums", interval_ms); xmlNode *xml = NULL; xml = pcmk__output_create_xml_node(out, "digests", "resource", pcmk__s(rsc->id, ""), "node", pcmk__s(node->details->uname, ""), "task", pcmk__s(task, ""), "interval", interval_s, NULL); free(interval_s); if (digests != NULL) { add_digest_xml(xml, "all", digests->digest_all_calc, digests->params_all); add_digest_xml(xml, "nonprivate", digests->digest_secure_calc, digests->params_secure); add_digest_xml(xml, "nonreloadable", digests->digest_restart_calc, digests->params_restart); } return pcmk_rc_ok; } #define STOP_SANITY_ASSERT(lineno) do { \ if(current && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if(stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", \ __func__, lineno, rsc->id); \ CRM_ASSERT(stop != NULL); \ } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \ crm_err("%s:%d: Action %s is still optional", \ __func__, lineno, stop->uuid); \ CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \ } \ } while(0) PCMK__OUTPUT_ARGS("rsc-action", "pe_resource_t *", "pe_node_t *", "pe_node_t *") static int rsc_action_default(pcmk__output_t *out, va_list args) { pe_resource_t *rsc = va_arg(args, pe_resource_t *); pe_node_t *current = va_arg(args, pe_node_t *); pe_node_t *next = va_arg(args, pe_node_t *); GList *possible_matches = NULL; char *key = NULL; int rc = pcmk_rc_no_output; bool moving = false; pe_node_t *start_node = NULL; pe_action_t *start = NULL; pe_action_t *stop = NULL; pe_action_t *promote = NULL; pe_action_t *demote = NULL; if (!pcmk_is_set(rsc->flags, pe_rsc_managed) || (current == NULL && next == NULL)) { pe_rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, role2text(rsc->role), !pcmk_is_set(rsc->flags, pe_rsc_managed)? " unmanaged" : ""); return rc; } moving = (current != NULL) && (next != NULL) && (current->details != next->details); possible_matches = pe__resource_actions(rsc, next, RSC_START, false); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) { start_node = NULL; } else { start_node = current; } possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, false); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } else if (pcmk_is_set(rsc->flags, pe_rsc_stop_unexpected)) { /* The resource is multiply active with multiple-active set to * stop_unexpected, and not stopping on its current node, but it should * be stopping elsewhere. */ possible_matches = pe__resource_actions(rsc, NULL, RSC_STOP, false); if (possible_matches != NULL) { stop = possible_matches->data; g_list_free(possible_matches); } } possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, false); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, false); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->role == rsc->next_role) { pe_action_t *migrate_op = NULL; CRM_CHECK(next != NULL, return rc); possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, false); if (possible_matches) { migrate_op = possible_matches->data; } if ((migrate_op != NULL) && (current != NULL) && pcmk_is_set(migrate_op->flags, pe_action_runnable)) { rc = out->message(out, "rsc-action-item", "Migrate", rsc, current, next, start, NULL); } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) { rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next, start, NULL); } else if (start == NULL || pcmk_is_set(start->flags, pe_action_optional)) { if ((demote != NULL) && (promote != NULL) && !pcmk_is_set(demote->flags, pe_action_optional) && !pcmk_is_set(promote->flags, pe_action_optional)) { rc = out->message(out, "rsc-action-item", "Re-promote", rsc, current, next, promote, demote); } else { pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), pe__node_name(next)); } } else if (!pcmk_is_set(start->flags, pe_action_runnable)) { rc = out->message(out, "rsc-action-item", "Stop", rsc, current, NULL, stop, (stop && stop->reason)? stop : start); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { rc = out->message(out, "rsc-action-item", pcmk_is_set(rsc->flags, pe_rsc_failed)? "Recover" : "Move", rsc, current, next, stop, NULL); } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { rc = out->message(out, "rsc-action-item", "Recover", rsc, current, NULL, stop, NULL); STOP_SANITY_ASSERT(__LINE__); } else { rc = out->message(out, "rsc-action-item", "Restart", rsc, current, next, start, NULL); /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */ } g_list_free(possible_matches); return rc; } if(stop && (rsc->next_role == RSC_ROLE_STOPPED || (start && !pcmk_is_set(start->flags, pe_action_runnable)))) { GList *gIter = NULL; key = stop_key(rsc); for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pe_action_t *stop_op = NULL; possible_matches = find_actions(rsc->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op && (stop_op->flags & pe_action_runnable)) { STOP_SANITY_ASSERT(__LINE__); } if (out->message(out, "rsc-action-item", "Stop", rsc, node, NULL, stop_op, (stop_op && stop_op->reason)? stop_op : start) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } free(key); } else if ((stop != NULL) && pcmk_all_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_stop)) { /* 'stop' may be NULL if the failure was ignored */ rc = out->message(out, "rsc-action-item", "Recover", rsc, current, next, stop, start); STOP_SANITY_ASSERT(__LINE__); } else if (moving) { rc = out->message(out, "rsc-action-item", "Move", rsc, current, next, stop, NULL); STOP_SANITY_ASSERT(__LINE__); } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) { rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next, start, NULL); } else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) { rc = out->message(out, "rsc-action-item", "Restart", rsc, current, next, start, NULL); STOP_SANITY_ASSERT(__LINE__); } else if (rsc->role == RSC_ROLE_PROMOTED) { CRM_LOG_ASSERT(current != NULL); rc = out->message(out, "rsc-action-item", "Demote", rsc, current, next, demote, NULL); } else if (rsc->next_role == RSC_ROLE_PROMOTED) { CRM_LOG_ASSERT(next); rc = out->message(out, "rsc-action-item", "Promote", rsc, current, next, promote, NULL); } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) { rc = out->message(out, "rsc-action-item", "Start", rsc, current, next, start, NULL); } return rc; } PCMK__OUTPUT_ARGS("node-action", "char *", "char *", "char *") static int node_action(pcmk__output_t *out, va_list args) { char *task = va_arg(args, char *); char *node_name = va_arg(args, char *); char *reason = va_arg(args, char *); if (task == NULL) { return pcmk_rc_no_output; } else if (reason) { out->list_item(out, NULL, "%s %s '%s'", task, node_name, reason); } else { crm_notice(" * %s %s", task, node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-action", "char *", "char *", "char *") static int node_action_xml(pcmk__output_t *out, va_list args) { char *task = va_arg(args, char *); char *node_name = va_arg(args, char *); char *reason = va_arg(args, char *); if (task == NULL) { return pcmk_rc_no_output; } else if (reason) { pcmk__output_create_xml_node(out, "node_action", "task", task, "node", node_name, "reason", reason, NULL); } else { crm_notice(" * %s %s", task, node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *", "const char *", "bool", "bool") static int node_info_default(pcmk__output_t *out, va_list args) { uint32_t node_id = va_arg(args, uint32_t); const char *node_name = va_arg(args, const char *); const char *uuid = va_arg(args, const char *); const char *state = va_arg(args, const char *); bool have_quorum = (bool) va_arg(args, int); bool is_remote = (bool) va_arg(args, int); return out->info(out, "Node %" PRIu32 ": %s " "(uuid=%s, state=%s, have_quorum=%s, is_remote=%s)", node_id, pcmk__s(node_name, "unknown"), pcmk__s(uuid, "unknown"), pcmk__s(state, "unknown"), pcmk__btoa(have_quorum), pcmk__btoa(is_remote)); } PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "bool", "bool", "const char *", "const char *") static int node_info_xml(pcmk__output_t *out, va_list args) { uint32_t node_id = va_arg(args, uint32_t); const char *node_name = va_arg(args, const char *); const char *uuid = va_arg(args, const char *); const char *state = va_arg(args, const char *); bool have_quorum = (bool) va_arg(args, int); bool is_remote = (bool) va_arg(args, int); char *id_s = crm_strdup_printf("%" PRIu32, node_id); pcmk__output_create_xml_node(out, "node-info", "nodeid", id_s, XML_ATTR_UNAME, node_name, XML_ATTR_ID, uuid, XML_NODE_IS_PEER, state, XML_ATTR_HAVE_QUORUM, pcmk__btoa(have_quorum), XML_NODE_IS_REMOTE, pcmk__btoa(is_remote), NULL); free(id_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr") static int inject_cluster_action(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr rsc = va_arg(args, xmlNodePtr); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if(rsc) { out->list_item(out, NULL, "Cluster action: %s for %s on %s", task, ID(rsc), node); } else { out->list_item(out, NULL, "Cluster action: %s on %s", task, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr") static int inject_cluster_action_xml(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr rsc = va_arg(args, xmlNodePtr); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, "cluster_action", "task", task, "node", node, NULL); if (rsc) { crm_xml_add(xml_node, "id", ID(rsc)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-fencing-action", "char *", "const char *") static int inject_fencing_action(pcmk__output_t *out, va_list args) { char *target = va_arg(args, char *); const char *op = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Fencing %s (%s)", target, op); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-fencing-action", "char *", "const char *") static int inject_fencing_action_xml(pcmk__output_t *out, va_list args) { char *target = va_arg(args, char *); const char *op = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "fencing_action", "target", target, "op", op, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-attr", "const char *", "const char *", "xmlNodePtr") static int inject_attr(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr cib_node = va_arg(args, xmlNodePtr); xmlChar *node_path = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node_path = xmlGetNodePath(cib_node); out->list_item(out, NULL, "Injecting attribute %s=%s into %s '%s'", name, value, node_path, ID(cib_node)); free(node_path); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-attr", "const char *", "const char *", "xmlNodePtr") static int inject_attr_xml(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr cib_node = va_arg(args, xmlNodePtr); xmlChar *node_path = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node_path = xmlGetNodePath(cib_node); pcmk__output_create_xml_node(out, "inject_attr", "name", name, "value", value, "node_path", node_path, "cib_node", ID(cib_node), NULL); free(node_path); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-spec", "const char *") static int inject_spec(pcmk__output_t *out, va_list args) { const char *spec = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Injecting %s into the configuration", spec); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-spec", "const char *") static int inject_spec_xml(pcmk__output_t *out, va_list args) { const char *spec = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "inject_spec", "spec", spec, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-config", "char *", "char *") static int inject_modify_config(pcmk__output_t *out, va_list args) { char *quorum = va_arg(args, char *); char *watchdog = va_arg(args, char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Performing Requested Modifications"); if (quorum) { out->list_item(out, NULL, "Setting quorum: %s", quorum); } if (watchdog) { out->list_item(out, NULL, "Setting watchdog: %s", watchdog); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-config", "char *", "char *") static int inject_modify_config_xml(pcmk__output_t *out, va_list args) { char *quorum = va_arg(args, char *); char *watchdog = va_arg(args, char *); xmlNodePtr node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node = pcmk__output_xml_create_parent(out, "modifications", NULL); if (quorum) { crm_xml_add(node, "quorum", quorum); } if (watchdog) { crm_xml_add(node, "watchdog", watchdog); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-node", "const char *", "char *") static int inject_modify_node(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); char *node = va_arg(args, char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (pcmk__str_eq(action, "Online", pcmk__str_none)) { out->list_item(out, NULL, "Bringing node %s online", node); return pcmk_rc_ok; } else if (pcmk__str_eq(action, "Offline", pcmk__str_none)) { out->list_item(out, NULL, "Taking node %s offline", node); return pcmk_rc_ok; } else if (pcmk__str_eq(action, "Failing", pcmk__str_none)) { out->list_item(out, NULL, "Failing node %s", node); return pcmk_rc_ok; } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("inject-modify-node", "const char *", "char *") static int inject_modify_node_xml(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); char *node = va_arg(args, char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "modify_node", "action", action, "node", node, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-ticket", "const char *", "char *") static int inject_modify_ticket(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); char *ticket = va_arg(args, char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (pcmk__str_eq(action, "Standby", pcmk__str_none)) { out->list_item(out, NULL, "Making ticket %s standby", ticket); } else { out->list_item(out, NULL, "%s ticket %s", action, ticket); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-ticket", "const char *", "char *") static int inject_modify_ticket_xml(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); char *ticket = va_arg(args, char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, "modify_ticket", "action", action, "ticket", ticket, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-pseudo-action", "const char *", "const char *") static int inject_pseudo_action(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Pseudo action: %s%s%s", task, node ? " on " : "", node ? node : ""); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-pseudo-action", "const char *", "const char *") static int inject_pseudo_action_xml(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, "pseudo_action", "task", task, NULL); if (node) { crm_xml_add(xml_node, "node", node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-rsc-action", "const char *", "const char *", "char *", "guint") static int inject_rsc_action(pcmk__output_t *out, va_list args) { const char *rsc = va_arg(args, const char *); const char *operation = va_arg(args, const char *); char *node = va_arg(args, char *); guint interval_ms = va_arg(args, guint); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (interval_ms) { out->list_item(out, NULL, "Resource action: %-15s %s=%u on %s", rsc, operation, interval_ms, node); } else { out->list_item(out, NULL, "Resource action: %-15s %s on %s", rsc, operation, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-rsc-action", "const char *", "const char *", "char *", "guint") static int inject_rsc_action_xml(pcmk__output_t *out, va_list args) { const char *rsc = va_arg(args, const char *); const char *operation = va_arg(args, const char *); char *node = va_arg(args, char *); guint interval_ms = va_arg(args, guint); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, "rsc_action", "resource", rsc, "op", operation, "node", node, NULL); if (interval_ms) { char *interval_s = pcmk__itoa(interval_ms); crm_xml_add(xml_node, "interval", interval_s); free(interval_s); } return pcmk_rc_ok; } #define CHECK_RC(retcode, retval) \ if (retval == pcmk_rc_ok) { \ retcode = pcmk_rc_ok; \ } PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") int pcmk__cluster_status_text(pcmk__output_t *out, va_list args) { pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); int rc = pcmk_rc_no_output; bool already_printed_failure = false; CHECK_RC(rc, out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts, show_opts)); if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) { CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames, resources, show_opts, rc == pcmk_rc_ok)); } /* Print resources section, if needed */ if (pcmk_is_set(section_opts, pcmk_section_resources)) { CHECK_RC(rc, out->message(out, "resource-list", data_set, show_opts, true, unames, resources, rc == pcmk_rc_ok)); } /* print Node Attributes section if requested */ if (pcmk_is_set(section_opts, pcmk_section_attributes)) { CHECK_RC(rc, out->message(out, "node-attribute-list", data_set, show_opts, rc == pcmk_rc_ok, unames, resources)); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) { CHECK_RC(rc, out->message(out, "node-summary", data_set, unames, resources, section_opts, show_opts, rc == pcmk_rc_ok)); } /* If there were any failed actions, print them */ if (pcmk_is_set(section_opts, pcmk_section_failures) && xml_has_children(data_set->failed)) { CHECK_RC(rc, out->message(out, "failed-action-list", data_set, unames, resources, show_opts, rc == pcmk_rc_ok)); } /* Print failed stonith actions */ if (pcmk_is_set(section_opts, pcmk_section_fence_failed) && fence_history != pcmk__fence_history_none) { if (history_rc == 0) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } else { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); already_printed_failure = true; } } /* Print tickets if requested */ if (pcmk_is_set(section_opts, pcmk_section_tickets)) { CHECK_RC(rc, out->message(out, "ticket-list", data_set, rc == pcmk_rc_ok)); } /* Print negative location constraints if requested */ if (pcmk_is_set(section_opts, pcmk_section_bans)) { CHECK_RC(rc, out->message(out, "ban-list", data_set, prefix, resources, show_opts, rc == pcmk_rc_ok)); } /* Print stonith history */ if (pcmk_any_flags_set(section_opts, pcmk_section_fencing_all) && fence_history != pcmk__fence_history_none) { if (history_rc != 0) { if (!already_printed_failure) { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "fencing-list", hp, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } } return rc; } PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") static int cluster_status_xml(pcmk__output_t *out, va_list args) { pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts, show_opts); /*** NODES ***/ if (pcmk_is_set(section_opts, pcmk_section_nodes)) { out->message(out, "node-list", data_set->nodes, unames, resources, show_opts, false); } /* Print resources section, if needed */ if (pcmk_is_set(section_opts, pcmk_section_resources)) { /* XML output always displays full details. */ uint32_t full_show_opts = show_opts & ~pcmk_show_brief; out->message(out, "resource-list", data_set, full_show_opts, false, unames, resources, false); } /* print Node Attributes section if requested */ if (pcmk_is_set(section_opts, pcmk_section_attributes)) { out->message(out, "node-attribute-list", data_set, show_opts, false, unames, resources); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) { out->message(out, "node-summary", data_set, unames, resources, section_opts, show_opts, false); } /* If there were any failed actions, print them */ if (pcmk_is_set(section_opts, pcmk_section_failures) && xml_has_children(data_set->failed)) { out->message(out, "failed-action-list", data_set, unames, resources, show_opts, false); } /* Print stonith history */ if (pcmk_is_set(section_opts, pcmk_section_fencing_all) && fence_history != pcmk__fence_history_none) { out->message(out, "full-fencing-list", history_rc, stonith_history, unames, section_opts, show_opts, false); } /* Print tickets if requested */ if (pcmk_is_set(section_opts, pcmk_section_tickets)) { out->message(out, "ticket-list", data_set, false); } /* Print negative location constraints if requested */ if (pcmk_is_set(section_opts, pcmk_section_bans)) { out->message(out, "ban-list", data_set, prefix, resources, show_opts, false); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") static int cluster_status_html(pcmk__output_t *out, va_list args) { pe_working_set_t *data_set = va_arg(args, pe_working_set_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); bool already_printed_failure = false; out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts, show_opts); /*** NODE LIST ***/ if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) { out->message(out, "node-list", data_set->nodes, unames, resources, show_opts, false); } /* Print resources section, if needed */ if (pcmk_is_set(section_opts, pcmk_section_resources)) { out->message(out, "resource-list", data_set, show_opts, true, unames, resources, false); } /* print Node Attributes section if requested */ if (pcmk_is_set(section_opts, pcmk_section_attributes)) { out->message(out, "node-attribute-list", data_set, show_opts, false, unames, resources); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) { out->message(out, "node-summary", data_set, unames, resources, section_opts, show_opts, false); } /* If there were any failed actions, print them */ if (pcmk_is_set(section_opts, pcmk_section_failures) && xml_has_children(data_set->failed)) { out->message(out, "failed-action-list", data_set, unames, resources, show_opts, false); } /* Print failed stonith actions */ if (pcmk_is_set(section_opts, pcmk_section_fence_failed) && fence_history != pcmk__fence_history_none) { if (history_rc == 0) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "failed-fencing-list", stonith_history, unames, section_opts, show_opts, false); } } else { out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } /* Print stonith history */ if (pcmk_any_flags_set(section_opts, pcmk_section_fencing_all) && fence_history != pcmk__fence_history_none) { if (history_rc != 0) { if (!already_printed_failure) { out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "fencing-list", hp, unames, section_opts, show_opts, false); } } else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) { stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { out->message(out, "pending-fencing-list", hp, unames, section_opts, show_opts, false); } } } /* Print tickets if requested */ if (pcmk_is_set(section_opts, pcmk_section_tickets)) { out->message(out, "ticket-list", data_set, false); } /* Print negative location constraints if requested */ if (pcmk_is_set(section_opts, pcmk_section_bans)) { out->message(out, "ban-list", data_set, prefix, resources, show_opts, false); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *", "const char *", "const char *") static int attribute_default(pcmk__output_t *out, va_list args) { const char *scope = va_arg(args, const char *); const char *instance = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); const char *host = va_arg(args, const char *); GString *s = g_string_sized_new(50); if (!pcmk__str_empty(scope)) { pcmk__g_strcat(s, "scope=\"", scope, "\" ", NULL); } if (!pcmk__str_empty(instance)) { pcmk__g_strcat(s, "id=\"", instance, "\" ", NULL); } pcmk__g_strcat(s, "name=\"", pcmk__s(name, ""), "\" ", NULL); if (!pcmk__str_empty(host)) { pcmk__g_strcat(s, "host=\"", host, "\" ", NULL); } pcmk__g_strcat(s, "value=\"", pcmk__s(value, ""), "\"", NULL); out->info(out, "%s", s->str); g_string_free(s, TRUE); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *", "const char *", "const char *") static int attribute_xml(pcmk__output_t *out, va_list args) { const char *scope = va_arg(args, const char *); const char *instance = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); const char *host = va_arg(args, const char *); xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, "attribute", "name", name, "value", value ? value : "", NULL); if (!pcmk__str_empty(scope)) { crm_xml_add(node, "scope", scope); } if (!pcmk__str_empty(instance)) { crm_xml_add(node, "id", instance); } if (!pcmk__str_empty(host)) { crm_xml_add(node, "host", host); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rule-check", "const char *", "int", "const char *") static int rule_check_default(pcmk__output_t *out, va_list args) { const char *rule_id = va_arg(args, const char *); int result = va_arg(args, int); const char *error = va_arg(args, const char *); switch (result) { case pcmk_rc_within_range: return out->info(out, "Rule %s is still in effect", rule_id); case pcmk_rc_ok: return out->info(out, "Rule %s satisfies conditions", rule_id); case pcmk_rc_after_range: return out->info(out, "Rule %s is expired", rule_id); case pcmk_rc_before_range: return out->info(out, "Rule %s has not yet taken effect", rule_id); case pcmk_rc_op_unsatisfied: return out->info(out, "Rule %s does not satisfy conditions", rule_id); default: out->err(out, "Could not determine whether rule %s is in effect: %s", rule_id, ((error != NULL)? error : "unexpected error")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("rule-check", "const char *", "int", "const char *") static int rule_check_xml(pcmk__output_t *out, va_list args) { const char *rule_id = va_arg(args, const char *); int result = va_arg(args, int); const char *error = va_arg(args, const char *); char *rc_str = pcmk__itoa(pcmk_rc2exitc(result)); pcmk__output_create_xml_node(out, "rule-check", "rule-id", rule_id, "rc", rc_str, NULL); free(rc_str); switch (result) { case pcmk_rc_within_range: case pcmk_rc_ok: case pcmk_rc_after_range: case pcmk_rc_before_range: case pcmk_rc_op_unsatisfied: return pcmk_rc_ok; default: out->err(out, "Could not determine whether rule %s is in effect: %s", rule_id, ((error != NULL)? error : "unexpected error")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_none(pcmk__output_t *out, va_list args) { return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_text(pcmk__output_t *out, va_list args) { int code = va_arg(args, int); const char *name = va_arg(args, const char *); const char *desc = va_arg(args, const char *); static int code_width = 0; if (out->is_quiet(out)) { /* If out->is_quiet(), don't print the code. Print name and/or desc in a * compact format for text output, or print nothing at all for none-type * output. */ if ((name != NULL) && (desc != NULL)) { pcmk__formatted_printf(out, "%s - %s\n", name, desc); } else if ((name != NULL) || (desc != NULL)) { pcmk__formatted_printf(out, "%s\n", ((name != NULL)? name : desc)); } return pcmk_rc_ok; } /* Get length of longest (most negative) standard Pacemaker return code * This should be longer than all the values of any other type of return * code. */ if (code_width == 0) { long long most_negative = pcmk_rc_error - (long long) pcmk__n_rc + 1; code_width = (int) snprintf(NULL, 0, "%lld", most_negative); } if ((name != NULL) && (desc != NULL)) { static int name_width = 0; if (name_width == 0) { // Get length of longest standard Pacemaker return code name for (int lpc = 0; lpc < pcmk__n_rc; lpc++) { int len = (int) strlen(pcmk_rc_name(pcmk_rc_error - lpc)); name_width = QB_MAX(name_width, len); } } return out->info(out, "% *d: %-*s %s", code_width, code, name_width, name, desc); } if ((name != NULL) || (desc != NULL)) { return out->info(out, "% *d: %s", code_width, code, ((name != NULL)? name : desc)); } return out->info(out, "% *d", code_width, code); } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_xml(pcmk__output_t *out, va_list args) { int code = va_arg(args, int); const char *name = va_arg(args, const char *); const char *desc = va_arg(args, const char *); char *code_str = pcmk__itoa(code); pcmk__output_create_xml_node(out, "result-code", "code", code_str, XML_ATTR_NAME, name, XML_ATTR_DESC, desc, NULL); free(code_str); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "attribute", "default", attribute_default }, { "attribute", "xml", attribute_xml }, { "cluster-status", "default", pcmk__cluster_status_text }, { "cluster-status", "html", cluster_status_html }, { "cluster-status", "xml", cluster_status_xml }, { "crmadmin-node", "default", crmadmin_node }, { "crmadmin-node", "text", crmadmin_node_text }, { "crmadmin-node", "xml", crmadmin_node_xml }, { "dc", "default", dc }, { "dc", "text", dc_text }, { "dc", "xml", dc_xml }, { "digests", "default", digests_text }, { "digests", "xml", digests_xml }, { "health", "default", health }, { "health", "text", health_text }, { "health", "xml", health_xml }, { "inject-attr", "default", inject_attr }, { "inject-attr", "xml", inject_attr_xml }, { "inject-cluster-action", "default", inject_cluster_action }, { "inject-cluster-action", "xml", inject_cluster_action_xml }, { "inject-fencing-action", "default", inject_fencing_action }, { "inject-fencing-action", "xml", inject_fencing_action_xml }, { "inject-modify-config", "default", inject_modify_config }, { "inject-modify-config", "xml", inject_modify_config_xml }, { "inject-modify-node", "default", inject_modify_node }, { "inject-modify-node", "xml", inject_modify_node_xml }, { "inject-modify-ticket", "default", inject_modify_ticket }, { "inject-modify-ticket", "xml", inject_modify_ticket_xml }, { "inject-pseudo-action", "default", inject_pseudo_action }, { "inject-pseudo-action", "xml", inject_pseudo_action_xml }, { "inject-rsc-action", "default", inject_rsc_action }, { "inject-rsc-action", "xml", inject_rsc_action_xml }, { "inject-spec", "default", inject_spec }, { "inject-spec", "xml", inject_spec_xml }, { "locations-list", "default", locations_list }, { "locations-list", "xml", locations_list_xml }, { "node-action", "default", node_action }, { "node-action", "xml", node_action_xml }, { "node-info", "default", node_info_default }, { "node-info", "xml", node_info_xml }, { "pacemakerd-health", "default", pacemakerd_health }, { "pacemakerd-health", "html", pacemakerd_health_html }, { "pacemakerd-health", "text", pacemakerd_health_text }, { "pacemakerd-health", "xml", pacemakerd_health_xml }, { "profile", "default", profile_default, }, { "profile", "xml", profile_xml }, { "result-code", "none", result_code_none }, { "result-code", "text", result_code_text }, { "result-code", "xml", result_code_xml }, { "rsc-action", "default", rsc_action_default }, { "rsc-action-item", "default", rsc_action_item }, { "rsc-action-item", "xml", rsc_action_item_xml }, { "rsc-is-colocated-with-list", "default", rsc_is_colocated_with_list }, { "rsc-is-colocated-with-list", "xml", rsc_is_colocated_with_list_xml }, { "rscs-colocated-with-list", "default", rscs_colocated_with_list }, { "rscs-colocated-with-list", "xml", rscs_colocated_with_list_xml }, { "rule-check", "default", rule_check_default }, { "rule-check", "xml", rule_check_xml }, { "locations-and-colocations", "default", locations_and_colocations }, { "locations-and-colocations", "xml", locations_and_colocations_xml }, { NULL, NULL, NULL } }; void pcmk__register_lib_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); } diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c index fc426f2e18..8ec75a7407 100644 --- a/lib/pacemaker/pcmk_sched_allocate.c +++ b/lib/pacemaker/pcmk_sched_allocate.c @@ -1,805 +1,811 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include "libpacemaker_private.h" CRM_TRACE_INIT_DATA(pacemaker); /*! * \internal * \brief Do deferred action checks after allocation * * When unpacking the resource history, the scheduler checks for resource * configurations that have changed since an action was run. However, at that * time, bundles using the REMOTE_CONTAINER_HACK don't have their final * parameter information, so instead they add a deferred check to a list. This * function processes one entry in that list. * * \param[in,out] rsc Resource that action history is for * \param[in,out] node Node that action history is for * \param[in] rsc_op Action history entry * \param[in] check Type of deferred check to do */ static void check_params(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_op, enum pe_check_parameters check) { const char *reason = NULL; op_digest_cache_t *digest_data = NULL; switch (check) { case pe_check_active: if (pcmk__check_action_config(rsc, node, rsc_op) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL)) { reason = "action definition changed"; } break; case pe_check_last_failure: digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, rsc->cluster); switch (digest_data->rc) { case RSC_DIGEST_UNKNOWN: crm_trace("Resource %s history entry %s on %s has " "no digest to compare", rsc->id, ID(rsc_op), node->details->id); break; case RSC_DIGEST_MATCH: break; default: reason = "resource parameters have changed"; break; } break; } if (reason != NULL) { pe__clear_failcount(rsc, node, reason, rsc->cluster); } } /*! * \internal * \brief Check whether a resource has failcount clearing scheduled on a node * * \param[in] node Node to check * \param[in] rsc Resource to check * * \return true if \p rsc has failcount clearing scheduled on \p node, * otherwise false */ static bool -failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc) +failcount_clear_action_exists(const pe_node_t *node, const pe_resource_t *rsc) { GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE); if (list != NULL) { g_list_free(list); return true; } return false; } /*! * \internal * \brief Ban a resource from a node if it reached its failure threshold there * - * \param[in] rsc Resource to check failure threshold for - * \param[in] node Node to check \p rsc on + * \param[in,out] rsc Resource to check failure threshold for + * \param[in] node Node to check \p rsc on */ static void -check_failure_threshold(pe_resource_t *rsc, pe_node_t *node) +check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node) { // If this is a collective resource, apply recursively to children instead if (rsc->children != NULL) { g_list_foreach(rsc->children, (GFunc) check_failure_threshold, - node); + (gpointer) node); return; } else if (failcount_clear_action_exists(node, rsc)) { /* Don't force the resource away from this node due to a failcount * that's going to be cleared. * * @TODO Failcount clearing can be scheduled in * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in * schedule_resource_actions() via check_params(). This runs well before * then, so it cannot detect those, meaning we might check the migration * threshold when we shouldn't. Worst case, we stop or move the * resource, then move it back in the next transition. */ return; } else { pe_resource_t *failed = NULL; if (pcmk__threshold_reached(rsc, node, &failed)) { resource_location(failed, node, -INFINITY, "__fail_limit__", rsc->cluster); } } } /*! * \internal * \brief If resource has exclusive discovery, ban node if not allowed * * Location constraints have a resource-discovery option that allows users to * specify where probes are done for the affected resource. If this is set to * exclusive, probes will only be done on nodes listed in exclusive constraints. * This function bans the resource from the node if the node is not listed. * - * \param[in] rsc Resource to check - * \param[in] node Node to check \p rsc on + * \param[in,out] rsc Resource to check + * \param[in] node Node to check \p rsc on */ static void -apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node) +apply_exclusive_discovery(pe_resource_t *rsc, const pe_node_t *node) { if (rsc->exclusive_discover || pe__const_top_resource(rsc, false)->exclusive_discover) { pe_node_t *match = NULL; // If this is a collective resource, apply recursively to children - g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node); + g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, + (gpointer) node); match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if ((match != NULL) && (match->rsc_discover_mode != pe_discover_exclusive)) { match->weight = -INFINITY; } } } /*! * \internal * \brief Apply stickiness to a resource if appropriate * - * \param[in] rsc Resource to check for stickiness - * \param[in] data_set Cluster working set + * \param[in,out] rsc Resource to check for stickiness + * \param[in,out] data_set Cluster working set */ static void apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set) { pe_node_t *node = NULL; // If this is a collective resource, apply recursively to children instead if (rsc->children != NULL) { g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set); return; } /* A resource is sticky if it is managed, has stickiness configured, and is * active on a single node. */ if (!pcmk_is_set(rsc->flags, pe_rsc_managed) || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) { return; } node = rsc->running_on->data; /* In a symmetric cluster, stickiness can always be used. In an * asymmetric cluster, we have to check whether the resource is still * allowed on the node, so we don't keep the resource somewhere it is no * longer explicitly enabled. */ if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster) && (pe_hash_table_lookup(rsc->allowed_nodes, node->details->id) == NULL)) { pe_rsc_debug(rsc, "Ignoring %s stickiness because the cluster is " "asymmetric and %s is not explicitly allowed", rsc->id, pe__node_name(node)); return; } pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s", rsc->id, rsc->stickiness, pe__node_name(node)); - resource_location(rsc, node, rsc->stickiness, "stickiness", - rsc->cluster); + resource_location(rsc, node, rsc->stickiness, "stickiness", data_set); } /*! * \internal * \brief Apply shutdown locks for all resources as appropriate * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ static void apply_shutdown_locks(pe_working_set_t *data_set) { if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) { return; } for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; rsc->cmds->shutdown_lock(rsc); } } /*! * \internal * \brief Calculate the number of available nodes in the cluster * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ static void count_available_nodes(pe_working_set_t *data_set) { if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) { return; } // @COMPAT for API backward compatibility only (cluster does not use value) for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; if ((node != NULL) && (node->weight >= 0) && node->details->online && (node->details->type != node_ping)) { data_set->max_valid_nodes++; } } crm_trace("Online node count: %d", data_set->max_valid_nodes); } /* * \internal * \brief Apply node-specific scheduling criteria * * After the CIB has been unpacked, process node-specific scheduling criteria * including shutdown locks, location constraints, resource stickiness, * migration thresholds, and exclusive resource discovery. */ static void apply_node_criteria(pe_working_set_t *data_set) { crm_trace("Applying node-specific scheduling criteria"); apply_shutdown_locks(data_set); count_available_nodes(data_set); pcmk__apply_locations(data_set); g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set); for (GList *node_iter = data_set->nodes; node_iter != NULL; node_iter = node_iter->next) { for (GList *rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) { pe_node_t *node = (pe_node_t *) node_iter->data; pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data; check_failure_threshold(rsc, node); apply_exclusive_discovery(rsc, node); } } } /*! * \internal * \brief Allocate resources to nodes * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ static void allocate_resources(pe_working_set_t *data_set) { GList *iter = NULL; crm_trace("Allocating resources to nodes"); if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) { pcmk__sort_resources(data_set); } pcmk__show_node_capacities("Original", data_set); if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) { /* Allocate remote connection resources first (which will also allocate * any colocation dependencies). If the connection is migrating, always * prefer the partial migration target. */ for (iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (rsc->is_remote_node) { pe_rsc_trace(rsc, "Allocating remote connection resource '%s'", rsc->id); rsc->cmds->assign(rsc, rsc->partial_migration_target); } } } /* now do the rest of the resources */ for (iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (!rsc->is_remote_node) { pe_rsc_trace(rsc, "Allocating %s resource '%s'", crm_element_name(rsc->xml), rsc->id); rsc->cmds->assign(rsc, NULL); } } pcmk__show_node_capacities("Remaining", data_set); } /*! * \internal * \brief Schedule fail count clearing on online nodes if resource is orphaned * - * \param[in] rsc Resource to check - * \param[in] data_set Cluster working set + * \param[in,out] rsc Resource to check + * \param[in,out] data_set Cluster working set */ static void clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set) { if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) { return; } crm_trace("Clear fail counts for orphaned resource %s", rsc->id); /* There's no need to recurse into rsc->children because those * should just be unallocated clone instances. */ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; pe_action_t *clear_op = NULL; if (!node->details->online) { continue; } if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL) == 0) { continue; } clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set); /* We can't use order_action_then_stop() here because its * pe_order_preserve breaks things */ pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc), NULL, pe_order_optional, data_set); } } /*! * \internal * \brief Schedule any resource actions needed * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ static void schedule_resource_actions(pe_working_set_t *data_set) { // Process deferred action checks pe__foreach_param_check(data_set, check_params); pe__free_param_checks(data_set); if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) { crm_trace("Scheduling probes"); pcmk__schedule_probes(data_set); } if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) { g_list_foreach(data_set->resources, (GFunc) clear_failcounts_if_orphaned, data_set); } crm_trace("Scheduling resource actions"); for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; rsc->cmds->create_actions(rsc); } } /*! * \internal * \brief Check whether a resource or any of its descendants are managed * * \param[in] rsc Resource to check * * \return true if resource or any descendent is managed, otherwise false */ static bool is_managed(const pe_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { return true; } for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { if (is_managed((pe_resource_t *) iter->data)) { return true; } } return false; } /*! * \internal * \brief Check whether any resources in the cluster are managed * * \param[in] data_set Cluster working set * * \return true if any resource is managed, otherwise false */ static bool -any_managed_resources(pe_working_set_t *data_set) +any_managed_resources(const pe_working_set_t *data_set) { - for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { - if (is_managed((pe_resource_t *) iter->data)) { + for (const GList *iter = data_set->resources; + iter != NULL; iter = iter->next) { + if (is_managed((const pe_resource_t *) iter->data)) { return true; } } return false; } /*! * \internal * \brief Check whether a node requires fencing * * \param[in] node Node to check * \param[in] have_managed Whether any resource in cluster is managed * \param[in] data_set Cluster working set * * \return true if \p node should be fenced, otherwise false */ static bool -needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set) +needs_fencing(const pe_node_t *node, bool have_managed, + const pe_working_set_t *data_set) { return have_managed && node->details->unclean && pe_can_fence(data_set, node); } /*! * \internal * \brief Check whether a node requires shutdown * * \param[in] node Node to check * * \return true if \p node should be shut down, otherwise false */ static bool -needs_shutdown(pe_node_t *node) +needs_shutdown(const pe_node_t *node) { if (pe__is_guest_or_remote_node(node)) { /* Do not send shutdown actions for Pacemaker Remote nodes. * @TODO We might come up with a good use for this in the future. */ return false; } return node->details->online && node->details->shutdown; } /*! * \internal * \brief Track and order non-DC fencing * - * \param[in] list List of existing non-DC fencing actions - * \param[in] action Fencing action to prepend to \p list + * \param[in,out] list List of existing non-DC fencing actions + * \param[in,out] action Fencing action to prepend to \p list + * \param[in] data_set Cluster working set * * \return (Possibly new) head of \p list */ static GList * -add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set) +add_nondc_fencing(GList *list, pe_action_t *action, + const pe_working_set_t *data_set) { if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing) && (list != NULL)) { /* Concurrent fencing is disabled, so order each non-DC * fencing in a chain. If there is any DC fencing or * shutdown, it will be ordered after the last action in the * chain later. */ order_actions((pe_action_t *) list->data, action, pe_order_optional); } return g_list_prepend(list, action); } /*! * \internal * \brief Schedule a node for fencing * - * \param[in] node Node that requires fencing - * \param[in] data_set Cluster working set + * \param[in,out] node Node that requires fencing + * \param[in,out] data_set Cluster working set */ static pe_action_t * schedule_fencing(pe_node_t *node, pe_working_set_t *data_set) { pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set); pe_warn("Scheduling node %s for fencing", pe__node_name(node)); pcmk__order_vs_fence(fencing, data_set); return fencing; } /*! * \internal * \brief Create and order node fencing and shutdown actions * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ static void schedule_fencing_and_shutdowns(pe_working_set_t *data_set) { pe_action_t *dc_down = NULL; bool integrity_lost = false; bool have_managed = any_managed_resources(data_set); GList *fencing_ops = NULL; GList *shutdown_ops = NULL; crm_trace("Scheduling fencing and shutdowns as needed"); if (!have_managed) { crm_notice("No fencing will be done until there are resources to manage"); } // Check each node for whether it needs fencing or shutdown for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; pe_action_t *fencing = NULL; /* Guest nodes are "fenced" by recovering their container resource, * so handle them separately. */ if (pe__is_guest_node(node)) { if (node->details->remote_requires_reset && have_managed && pe_can_fence(data_set, node)) { pcmk__fence_guest(node); } continue; } if (needs_fencing(node, have_managed, data_set)) { fencing = schedule_fencing(node, data_set); // Track DC and non-DC fence actions separately if (node->details->is_dc) { dc_down = fencing; } else { fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set); } } else if (needs_shutdown(node)) { pe_action_t *down_op = pcmk__new_shutdown_action(node); // Track DC and non-DC shutdown actions separately if (node->details->is_dc) { dc_down = down_op; } else { shutdown_ops = g_list_prepend(shutdown_ops, down_op); } } if ((fencing == NULL) && node->details->unclean) { integrity_lost = true; pe_warn("Node %s is unclean but cannot be fenced", pe__node_name(node)); } } if (integrity_lost) { if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { pe_warn("Resource functionality and data integrity cannot be " "guaranteed (configure, enable, and test fencing to " "correct this)"); } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { crm_notice("Unclean nodes will not be fenced until quorum is " "attained or no-quorum-policy is set to ignore"); } } if (dc_down != NULL) { /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated * DC elections. However, we don't want to order non-DC shutdowns before * a DC *fencing*, because even though we don't want a node that's * shutting down to become DC, the DC fencing could be ordered before a * clone stop that's also ordered before the shutdowns, thus leading to * a graph loop. */ if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) { pcmk__order_after_each(dc_down, shutdown_ops); } // Order any non-DC fencing before any DC fencing or shutdown if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) { /* With concurrent fencing, order each non-DC fencing action * separately before any DC fencing or shutdown. */ pcmk__order_after_each(dc_down, fencing_ops); } else if (fencing_ops != NULL) { /* Without concurrent fencing, the non-DC fencing actions are * already ordered relative to each other, so we just need to order * the DC fencing after the last action in the chain (which is the * first item in the list). */ order_actions((pe_action_t *) fencing_ops->data, dc_down, pe_order_optional); } } g_list_free(fencing_ops); g_list_free(shutdown_ops); } static void log_resource_details(pe_working_set_t *data_set) { pcmk__output_t *out = data_set->priv; GList *all = NULL; /* We need a list of nodes that we are allowed to output information for. * This is necessary because out->message for all the resource-related * messages expects such a list, due to the `crm_mon --node=` feature. Here, * we just make it a list of all the nodes. */ all = g_list_prepend(all, (gpointer) "*"); for (GList *item = data_set->resources; item != NULL; item = item->next) { pe_resource_t *rsc = (pe_resource_t *) item->data; // Log all resources except inactive orphans if (!pcmk_is_set(rsc->flags, pe_rsc_orphan) || (rsc->role != RSC_ROLE_STOPPED)) { out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all); } } g_list_free(all); } static void log_all_actions(pe_working_set_t *data_set) { /* This only ever outputs to the log, so ignore whatever output object was * previously set and just log instead. */ pcmk__output_t *prev_out = data_set->priv; pcmk__output_t *out = NULL; if (pcmk__log_output_new(&out) != pcmk_rc_ok) { return; } pe__register_messages(out); pcmk__register_lib_messages(out); pcmk__output_set_log_level(out, LOG_NOTICE); data_set->priv = out; out->begin_list(out, NULL, NULL, "Actions"); pcmk__output_actions(data_set); out->end_list(out); out->finish(out, CRM_EX_OK, true, NULL); pcmk__output_free(out); data_set->priv = prev_out; } /*! * \internal * \brief Log all required but unrunnable actions at trace level * * \param[in] data_set Cluster working set */ static void -log_unrunnable_actions(pe_working_set_t *data_set) +log_unrunnable_actions(const pe_working_set_t *data_set) { const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo; crm_trace("Required but unrunnable actions:"); - for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) { - pe_action_t *action = (pe_action_t *) iter->data; + for (const GList *iter = data_set->actions; + iter != NULL; iter = iter->next) { + + const pe_action_t *action = (const pe_action_t *) iter->data; if (!pcmk_any_flags_set(action->flags, flags)) { pcmk__log_action("\t", action, true); } } } /*! * \internal * \brief Unpack the CIB for scheduling * - * \param[in] cib CIB XML to unpack (may be NULL if previously unpacked) - * \param[in] flags Working set flags to set in addition to defaults - * \param[in] data_set Cluster working set + * \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked) + * \param[in] flags Working set flags to set in addition to defaults + * \param[in,out] data_set Cluster working set */ static void unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set) { const char* localhost_save = NULL; if (pcmk_is_set(data_set->flags, pe_flag_have_status)) { crm_trace("Reusing previously calculated cluster status"); pe__set_working_set_flags(data_set, flags); return; } if (data_set->localhost) { localhost_save = data_set->localhost; } CRM_ASSERT(cib != NULL); crm_trace("Calculating cluster status"); /* This will zero the entire struct without freeing anything first, so * callers should never call pcmk__schedule_actions() with a populated data * set unless pe_flag_have_status is set (i.e. cluster_status() was * previously called, whether directly or via pcmk__schedule_actions()). */ set_working_set_defaults(data_set); if (localhost_save) { data_set->localhost = localhost_save; } pe__set_working_set_flags(data_set, flags); data_set->input = cib; cluster_status(data_set); // Sets pe_flag_have_status } /*! * \internal * \brief Run the scheduler for a given CIB * - * \param[in] cib CIB XML to use as scheduler input + * \param[in,out] cib CIB XML to use as scheduler input * \param[in] flags Working set flags to set in addition to defaults * \param[in,out] data_set Cluster working set */ void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set) { unpack_cib(cib, flags, data_set); pcmk__set_allocation_methods(data_set); pcmk__apply_node_health(data_set); pcmk__unpack_constraints(data_set); if (pcmk_is_set(data_set->flags, pe_flag_check_config)) { return; } if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) && pcmk__is_daemon) { log_resource_details(data_set); } apply_node_criteria(data_set); if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) { return; } pcmk__create_internal_constraints(data_set); pcmk__handle_rsc_config_changes(data_set); allocate_resources(data_set); schedule_resource_actions(data_set); /* Remote ordering constraints need to happen prior to calculating fencing * because it is one more place we can mark nodes as needing fencing. */ pcmk__order_remote_connection_actions(data_set); schedule_fencing_and_shutdowns(data_set); pcmk__apply_orderings(data_set); log_all_actions(data_set); pcmk__create_graph(data_set); if (get_crm_log_level() == LOG_TRACE) { log_unrunnable_actions(data_set); } } diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c index c21aef92f2..e5b9be6a78 100644 --- a/lib/pacemaker/pcmk_sched_bundle.c +++ b/lib/pacemaker/pcmk_sched_bundle.c @@ -1,1156 +1,1156 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "libpacemaker_private.h" #define PE__VARIANT_BUNDLE 1 #include static bool is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node) { for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (node->details == replica->node->details) { return TRUE; } } return FALSE; } void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes, int max, int per_host_max, pe_working_set_t * data_set); static GList * get_container_list(const pe_resource_t *rsc) { GList *containers = NULL; if (rsc->variant == pe_container) { pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, rsc); for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; containers = g_list_append(containers, replica->container); } } return containers; } static inline GList * get_containers_or_children(const pe_resource_t *rsc) { return (rsc->variant == pe_container)? get_container_list(rsc) : rsc->children; } /*! * \internal * \brief Assign a bundle resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t * pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer) { GList *containers = NULL; GList *nodes = NULL; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return NULL); get_bundle_variant_data(bundle_data, rsc); pe__set_resource_flags(rsc, pe_rsc_allocating); containers = get_container_list(rsc); pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = pcmk__sort_nodes(nodes, NULL); containers = g_list_sort(containers, pcmk__cmp_instance); distribute_children(rsc, containers, nodes, bundle_data->nreplicas, bundle_data->nreplicas_per_host, rsc->cluster); g_list_free(nodes); g_list_free(containers); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; pe_node_t *container_host = NULL; CRM_ASSERT(replica); if (replica->ip) { pe_rsc_trace(rsc, "Allocating bundle %s IP %s", rsc->id, replica->ip->id); replica->ip->cmds->assign(replica->ip, prefer); } container_host = replica->container->allocated_to; if (replica->remote && pe__is_guest_or_remote_node(container_host)) { /* We need 'nested' connection resources to be on the same * host because pacemaker-remoted only supports a single * active connection */ pcmk__new_colocation("child-remote-with-docker-remote", NULL, INFINITY, replica->remote, container_host->details->remote_rsc, NULL, NULL, true, rsc->cluster); } if (replica->remote) { pe_rsc_trace(rsc, "Allocating bundle %s connection %s", rsc->id, replica->remote->id); replica->remote->cmds->assign(replica->remote, prefer); } // Explicitly allocate replicas' children before bundle child if (replica->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, replica->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if (node->details != replica->node->details) { node->weight = -INFINITY; } else if (!pcmk__threshold_reached(replica->child, node, NULL)) { node->weight = INFINITY; } } pe__set_resource_flags(replica->child->parent, pe_rsc_allocating); pe_rsc_trace(rsc, "Allocating bundle %s replica child %s", rsc->id, replica->child->id); replica->child->cmds->assign(replica->child, replica->node); pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating); } } if (bundle_data->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if (is_bundle_node(bundle_data, node)) { node->weight = 0; } else { node->weight = -INFINITY; } } pe_rsc_trace(rsc, "Allocating bundle %s child %s", rsc->id, bundle_data->child->id); bundle_data->child->cmds->assign(bundle_data->child, prefer); } pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); return NULL; } void pcmk__bundle_create_actions(pe_resource_t *rsc) { pe_action_t *action = NULL; GList *containers = NULL; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); containers = get_container_list(rsc); get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->ip) { replica->ip->cmds->create_actions(replica->ip); } if (replica->container) { replica->container->cmds->create_actions(replica->container); } if (replica->remote) { replica->remote->cmds->create_actions(replica->remote); } } clone_create_pseudo_actions(rsc, containers, NULL, NULL); if (bundle_data->child) { bundle_data->child->cmds->create_actions(bundle_data->child); if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) { /* promote */ pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true); action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true); action->priority = INFINITY; /* demote */ pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true); action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true); action->priority = INFINITY; } } g_list_free(containers); } void pcmk__bundle_internal_constraints(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); if (bundle_data->child) { pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child, RSC_START, pe_order_implies_first_printed); pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child, RSC_STOP, pe_order_implies_first_printed); if (bundle_data->child->children) { pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed); pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed); } else { pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed); pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed); } } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); CRM_ASSERT(replica->container); replica->container->cmds->internal_constraints(replica->container); pcmk__order_starts(rsc, replica->container, pe_order_runnable_left|pe_order_implies_first_printed); if (replica->child) { pcmk__order_stops(rsc, replica->child, pe_order_implies_first_printed); } pcmk__order_stops(rsc, replica->container, pe_order_implies_first_printed); pcmk__order_resource_actions(replica->container, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed); pcmk__order_resource_actions(replica->container, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed); if (replica->ip) { replica->ip->cmds->internal_constraints(replica->ip); // Start IP then container pcmk__order_starts(replica->ip, replica->container, pe_order_runnable_left|pe_order_preserve); pcmk__order_stops(replica->container, replica->ip, pe_order_implies_first|pe_order_preserve); pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, replica->container, NULL, NULL, true, rsc->cluster); } if (replica->remote) { /* This handles ordering and colocating remote relative to container * (via "resource-with-container"). Since IP is also ordered and * colocated relative to the container, we don't need to do anything * explicit here with IP. */ replica->remote->cmds->internal_constraints(replica->remote); } if (replica->child) { CRM_ASSERT(replica->remote); // "Start remote then child" is implicit in scheduler's remote logic } } if (bundle_data->child) { bundle_data->child->cmds->internal_constraints(bundle_data->child); if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) { pcmk__promotable_restart_ordering(rsc); /* child demoted before global demoted */ pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed); /* global demote before child demote */ pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child, RSC_DEMOTE, pe_order_implies_first_printed); /* child promoted before global promoted */ pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed); /* global promote before child promote */ pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child, RSC_PROMOTE, pe_order_implies_first_printed); } } } static pe_resource_t * compatible_replica_for_node(const pe_resource_t *rsc_lh, const pe_node_t *candidate, const pe_resource_t *rsc, enum rsc_role_e filter, gboolean current) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(candidate != NULL, return NULL); get_bundle_variant_data(bundle_data, rsc); crm_trace("Looking for compatible child from %s for %s on %s", rsc_lh->id, rsc->id, pe__node_name(candidate)); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (is_child_compatible(replica->container, candidate, filter, current)) { crm_trace("Pairing %s with %s on %s", rsc_lh->id, replica->container->id, pe__node_name(candidate)); return replica->container; } } crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id); return NULL; } static pe_resource_t * compatible_replica(const pe_resource_t *rsc_lh, const pe_resource_t *rsc, enum rsc_role_e filter, gboolean current, pe_working_set_t *data_set) { GList *scratch = NULL; pe_resource_t *pair = NULL; pe_node_t *active_node_lh = NULL; active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current); if (active_node_lh) { return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter, current); } scratch = g_hash_table_get_values(rsc_lh->allowed_nodes); scratch = pcmk__sort_nodes(scratch, NULL); for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none")); done: g_list_free(scratch); return pair; } int copies_per_node(pe_resource_t * rsc) { /* Strictly speaking, there should be a 'copies_per_node' addition * to the resource function table and each case would be a * function. However that would be serious overkill to return an * int. In fact, it seems to me that both function tables * could/should be replaced by resources.{c,h} full of * rsc_{some_operation} functions containing a switch as below * which calls out to functions named {variant}_{some_operation} * as needed. */ switch(rsc->variant) { case pe_unknown: return 0; case pe_native: case pe_group: return 1; case pe_clone: { const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); if (max_clones_node == NULL) { return 1; } else { int max_i; pcmk__scan_min_int(max_clones_node, &max_i, 0); return max_i; } } case pe_container: { pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, rsc); return data->nreplicas_per_host; } } return 0; } /*! * \internal * \brief Apply a colocation's score to node weights or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node weights (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { GList *allocated_primaries = NULL; pe__bundle_variant_data_t *bundle_data = NULL; /* This should never be called for the bundle itself as a dependent. * Instead, we add its colocation constraints to its replicas and call the * apply_coloc_score() for the replicas as dependents. */ CRM_ASSERT(!for_dependent); CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL), return); CRM_ASSERT(dependent->variant == pe_native); if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { pe_rsc_trace(primary, "%s is still provisional", primary->id); return; } else if (colocation->dependent->variant > pe_group) { pe_resource_t *primary_replica = compatible_replica(dependent, primary, RSC_ROLE_UNKNOWN, FALSE, dependent->cluster); if (primary_replica) { pe_rsc_debug(primary, "Pairing %s with %s", dependent->id, primary_replica->id); dependent->cmds->apply_coloc_score(dependent, primary_replica, colocation, true); } else if (colocation->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", dependent->id, primary->id); pcmk__assign_resource(dependent, NULL, true); } else { pe_rsc_debug(primary, "Cannot pair %s with instance of %s", dependent->id, primary->id); } return; } get_bundle_variant_data(bundle_data, primary); pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d", colocation->id, dependent->id, primary->id, colocation->score); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (colocation->score < INFINITY) { replica->container->cmds->apply_coloc_score(dependent, replica->container, colocation, false); } else { pe_node_t *chosen = replica->container->fns->location(replica->container, NULL, FALSE); if ((chosen == NULL) || is_set_recursive(replica->container, pe_rsc_block, TRUE)) { continue; } if ((colocation->primary_role >= RSC_ROLE_PROMOTED) && (replica->child == NULL)) { continue; } if ((colocation->primary_role >= RSC_ROLE_PROMOTED) && (replica->child->next_role < RSC_ROLE_PROMOTED)) { continue; } pe_rsc_trace(primary, "Allowing %s: %s %d", colocation->id, pe__node_name(chosen), chosen->weight); allocated_primaries = g_list_prepend(allocated_primaries, chosen); } } if (colocation->score >= INFINITY) { node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE); } g_list_free(allocated_primaries); } enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node) { GList *containers = NULL; enum pe_action_flags flags = 0; pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, action->rsc); if(data->child) { enum action_tasks task = get_complex_task(data->child, action->task, TRUE); switch(task) { case no_action: case action_notify: case action_notified: case action_promote: case action_promoted: case action_demote: case action_demoted: return summary_action_flags(action, data->child->children, node); default: break; } } containers = get_container_list(action->rsc); flags = summary_action_flags(action, containers, node); g_list_free(containers); return flags; } pe_resource_t * find_compatible_child_by_node(const pe_resource_t *local_child, const pe_node_t *local_node, const pe_resource_t *rsc, enum rsc_role_e filter, gboolean current) { GList *gIter = NULL; GList *children = NULL; if (local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, pe__node_name(local_node)); children = get_containers_or_children(rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if(is_child_compatible(child_rsc, local_node, filter, current)) { crm_trace("Pairing %s with %s on %s", local_child->id, child_rsc->id, pe__node_name(local_node)); return child_rsc; } } crm_trace("Can't pair %s with %s", local_child->id, rsc->id); if(children != rsc->children) { g_list_free(children); } return NULL; } static pe__bundle_replica_t * replica_for_container(const pe_resource_t *rsc, const pe_resource_t *container, const pe_node_t *node) { if (rsc->variant == pe_container) { const pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, rsc); for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->child && (container == replica->container) && pe__same_node(node, replica->node)) { return replica; } } } return NULL; } static uint32_t multi_update_interleave_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t filter, uint32_t type, pe_working_set_t *data_set) { GList *gIter = NULL; GList *children = NULL; gboolean current = FALSE; uint32_t changed = pcmk__updated_none; /* Fix this - lazy */ if (pcmk__ends_with(first->uuid, "_stopped_0") || pcmk__ends_with(first->uuid, "_demoted_0")) { current = TRUE; } children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *then_child = gIter->data; pe_resource_t *first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current); if (first_child == NULL && current) { crm_trace("Ignore"); } else if (first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if (pcmk_any_flags_set(type, pe_order_runnable_left|pe_order_implies_then) /* Mandatory */ ) { pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); if (pcmk__assign_resource(then_child, NULL, true)) { pcmk__set_updated_flags(changed, first, pcmk__updated_then); } } } else { pe_action_t *first_action = NULL; pe_action_t *then_action = NULL; enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); pe__bundle_replica_t *first_replica = NULL; pe__bundle_replica_t *then_replica = NULL; first_replica = replica_for_container(first->rsc, first_child, node); if (strstr(first->task, "stop") && first_replica && first_replica->child) { /* Except for 'stopped' we should be looking at the * in-container resource, actions for the child will * happen later and are therefor more likely to align * with the user's intent. */ first_action = find_first_action(first_replica->child->actions, NULL, task2text(task), node); } else { first_action = find_first_action(first_child->actions, NULL, task2text(task), node); } then_replica = replica_for_container(then->rsc, then_child, node); if (strstr(then->task, "mote") && then_replica && then_replica->child) { /* Promote/demote actions will never be found for the * container resource, look in the child instead * * Alternatively treat: * 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and * 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY' */ then_action = find_first_action(then_replica->child->actions, NULL, then->task, node); } else { then_action = find_first_action(then_child->actions, NULL, then->task, node); } if (first_action == NULL) { if (!pcmk_is_set(first_child->flags, pe_rsc_orphan) && !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) { crm_err("Internal error: No action found for %s in %s (first)", first_task, first_child->id); } else { crm_trace("No action found for %s in %s%s (first)", first_task, first_child->id, pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : ""); } continue; } /* We're only interested if 'then' is neither stopping nor being demoted */ if (then_action == NULL) { if (!pcmk_is_set(then_child->flags, pe_rsc_orphan) && !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } else { crm_trace("No action found for %s in %s%s (then)", then->task, then_child->id, pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : ""); } continue; } if (order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x", first_action->uuid, pcmk_is_set(first_action->flags, pe_action_optional), then_action->uuid, pcmk_is_set(then_action->flags, pe_action_optional), type); pcmk__set_updated_flags(changed, first, pcmk__updated_first|pcmk__updated_then); } if(first_action && then_action) { changed |= then_child->cmds->update_ordered_actions(first_action, then_action, node, first_child->cmds->action_flags(first_action, node), filter, type, data_set); } else { crm_err("Nothing found either for %s (%p) or %s (%p) %s", first_child->id, first_action, then_child->id, then_action, task2text(task)); } } } if(children != then->rsc->children) { g_list_free(children); } return changed; } static bool can_interleave_actions(pe_action_t *first, pe_action_t *then) { bool interleave = FALSE; pe_resource_t *rsc = NULL; const char *interleave_s = NULL; if(first->rsc == NULL || then->rsc == NULL) { crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc == then->rsc) { crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) { crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid); return FALSE; } if (pcmk__ends_with(then->uuid, "_stop_0") || pcmk__ends_with(then->uuid, "_demote_0")) { rsc = first->rsc; } else { rsc = then->rsc; } interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id); return interleave; } /*! * \internal * \brief Update two actions according to an ordering between them * * Given information about an ordering of two actions, update the actions' * flags (and runnable_before members if appropriate) as appropriate for the * ordering. In some cases, the ordering could be disabled as well. * * \param[in,out] first 'First' action in an ordering * \param[in,out] then 'Then' action in an ordering * \param[in] node If not NULL, limit scope of ordering to this node * (only used when interleaving instances) * \param[in] flags Action flags for \p first for ordering purposes * \param[in] filter Action flags to limit scope of certain updates (may * include pe_action_optional to affect only mandatory * actions, and pe_action_runnable to affect only * runnable actions) * \param[in] type Group of enum pe_ordering flags to apply * \param[in,out] data_set Cluster working set * * \return Group of enum pcmk__updated flags indicating what was updated */ uint32_t pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set) { uint32_t changed = pcmk__updated_none; crm_trace("%s -> %s", first->uuid, then->uuid); if(can_interleave_actions(first, then)) { changed = multi_update_interleave_actions(first, then, node, filter, type, data_set); } else if(then->rsc) { GList *gIter = NULL; GList *children = NULL; // Handle the 'primitive' ordering case changed |= pcmk__update_ordered_actions(first, then, node, flags, filter, type, data_set); // Now any children (or containers in the case of a bundle) children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *then_child = (pe_resource_t *) gIter->data; uint32_t then_child_changed = pcmk__updated_none; pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node); if (then_child_action) { uint32_t then_child_flags = then_child->cmds->action_flags(then_child_action, node); if (pcmk_is_set(then_child_flags, pe_action_runnable)) { then_child_changed |= then_child->cmds->update_ordered_actions(first, then_child_action, node, flags, filter, type, data_set); } changed |= then_child_changed; if (pcmk_is_set(then_child_changed, pcmk__updated_then)) { for (GList *lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) { pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data; pcmk__update_action_for_orderings(next->action, data_set); } } } } if(children != then->rsc->children) { g_list_free(children); } } return changed; } void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); pcmk__apply_location(rsc, constraint); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->container) { replica->container->cmds->apply_location(replica->container, constraint); } if (replica->ip) { replica->ip->cmds->apply_location(replica->ip, constraint); } } if (bundle_data->child && ((constraint->role_filter == RSC_ROLE_UNPROMOTED) || (constraint->role_filter == RSC_ROLE_PROMOTED))) { bundle_data->child->cmds->apply_location(bundle_data->child, constraint); bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location, constraint); } } /*! * \internal * \brief Add a resource's actions to the transition graph * - * \param[in] rsc Resource whose actions should be added + * \param[in,out] rsc Resource whose actions should be added */ void pcmk__bundle_expand(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); if (bundle_data->child) { bundle_data->child->cmds->add_actions_to_graph(bundle_data->child); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->remote && replica->container && pe__bundle_needs_remote_name(replica->remote)) { /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that * run pacemaker-remoted inside, without needing a separate IP for * the container. This is done by configuring the inner remote's * connection host as the magic string "#uname", then * replacing it with the underlying host when needed. */ xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']", replica->remote->xml, LOG_ERR); const char *calculated_addr = NULL; // Replace the value in replica->remote->xml (if appropriate) calculated_addr = pe__add_bundle_remote_name(replica->remote, rsc->cluster, nvpair, "value"); if (calculated_addr) { /* Since this is for the bundle as a resource, and not any * particular action, replace the value in the default * parameters (not evaluated for node). create_graph_action() * will grab it from there to replace it in node-evaluated * parameters. */ GHashTable *params = pe_rsc_params(replica->remote, NULL, rsc->cluster); g_hash_table_replace(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR), strdup(calculated_addr)); } else { /* The only way to get here is if the remote connection is * neither currently running nor scheduled to run. That means we * won't be doing any operations that require addr (only start * requires it; we additionally use it to compare digests when * unpacking status, promote, and migrate_from history, but * that's already happened by this point). */ crm_info("Unable to determine address for bundle %s remote connection", rsc->id); } } if (replica->ip) { replica->ip->cmds->add_actions_to_graph(replica->ip); } if (replica->container) { replica->container->cmds->add_actions_to_graph(replica->container); } if (replica->remote) { replica->remote->cmds->add_actions_to_graph(replica->remote); } } } /*! * \internal * * \brief Schedule any probes needed for a resource on a node * - * \param[in] rsc Resource to create probe for - * \param[in] node Node to create probe on + * \param[in,out] rsc Resource to create probe for + * \param[in,out] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node) { bool any_created = false; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return false); get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if ((replica->ip != NULL) && replica->ip->cmds->create_probe(replica->ip, node)) { any_created = true; } if ((replica->child != NULL) && (node->details == replica->node->details) && replica->child->cmds->create_probe(replica->child, node)) { any_created = true; } if ((replica->container != NULL) && replica->container->cmds->create_probe(replica->container, node)) { any_created = true; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that nreplicas_per_host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ for (GList *tIter = bundle_data->replicas; tIter && (bundle_data->nreplicas_per_host == 1); tIter = tIter->next) { pe__bundle_replica_t *other = tIter->data; if ((other != replica) && (other != NULL) && (other->container != NULL)) { pcmk__new_ordering(replica->container, pcmk__op_key(replica->container->id, RSC_STATUS, 0), NULL, other->container, pcmk__op_key(other->container->id, RSC_START, 0), NULL, pe_order_optional|pe_order_same_node, rsc->cluster); } } } if ((replica->container != NULL) && (replica->remote != NULL) && replica->remote->cmds->create_probe(replica->remote, node)) { /* Do not probe the remote resource until we know where the * container is running. This is required for REMOTE_CONTAINER_HACK * to correctly probe remote resources. */ char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS, 0); pe_action_t *probe = find_first_action(replica->remote->actions, probe_uuid, NULL, node); free(probe_uuid); if (probe != NULL) { any_created = true; crm_trace("Ordering %s probe on %s", replica->remote->id, pe__node_name(node)); pcmk__new_ordering(replica->container, pcmk__op_key(replica->container->id, RSC_START, 0), NULL, replica->remote, NULL, probe, pe_order_probe, rsc->cluster); } } } return any_created; } void pcmk__output_bundle_actions(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->ip != NULL) { replica->ip->cmds->output_actions(replica->ip); } if (replica->container != NULL) { replica->container->cmds->output_actions(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->output_actions(replica->remote); } if (replica->child != NULL) { replica->child->cmds->output_actions(replica->child); } } } // Bundle implementation of resource_alloc_functions_t:add_utilization() void pcmk__bundle_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { pe__bundle_variant_data_t *bundle_data = NULL; pe__bundle_replica_t *replica = NULL; if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } get_bundle_variant_data(bundle_data, rsc); if (bundle_data->replicas == NULL) { return; } /* All bundle replicas are identical, so using the utilization of the first * is sufficient for any. Only the implicit container resource can have * utilization values. */ replica = (pe__bundle_replica_t *) bundle_data->replicas->data; if (replica->container != NULL) { replica->container->cmds->add_utilization(replica->container, orig_rsc, all_rscs, utilization); } } // Bundle implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__bundle_shutdown_lock(pe_resource_t *rsc) { return; // Bundles currently don't support shutdown locks } diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c index 884ba8f455..a887f7d6d6 100644 --- a/lib/pacemaker/pcmk_sched_clone.c +++ b/lib/pacemaker/pcmk_sched_clone.c @@ -1,1195 +1,1195 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "libpacemaker_private.h" #define VARIANT_CLONE 1 #include static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all); static pe_node_t * can_run_instance(pe_resource_t * rsc, pe_node_t * node, int limit) { pe_node_t *local_node = NULL; if (node == NULL && rsc->allowed_nodes) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) { can_run_instance(rsc, local_node, limit); } return NULL; } if (!node) { /* make clang analyzer happy */ goto bail; } else if (!pcmk__node_available(node, false, false)) { goto bail; } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = pcmk__top_allowed_node(rsc, node); if (local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, pe__node_name(node)); goto bail; } else if (local_node->weight < 0) { common_update_score(rsc, node->details->id, local_node->weight); pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.", rsc->id, pe__node_name(node)); } else if (local_node->count < limit) { pe_rsc_trace(rsc, "%s can run on %s (already running %d)", rsc->id, pe__node_name(node), local_node->count); return local_node; } else { pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)", rsc->id, pe__node_name(node), local_node->count, limit); } bail: if (node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static pe_node_t * allocate_instance(pe_resource_t *rsc, pe_node_t *prefer, gboolean all_coloc, int limit, pe_working_set_t *data_set) { pe_node_t *chosen = NULL; GHashTable *backup = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)", rsc->id, (prefer? prefer->details->uname: "none"), (all_coloc? "all" : "some")); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } /* Only include positive colocation preferences of dependent resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc->parent, rsc, all_coloc); if (prefer) { pe_node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (local_prefer == NULL || local_prefer->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id, pe__node_name(prefer)); return NULL; } } can_run_instance(rsc, NULL, limit); backup = pcmk__copy_node_table(rsc->allowed_nodes); pe_rsc_trace(rsc, "Allocating instance %s", rsc->id); chosen = rsc->cmds->assign(rsc, prefer); if (chosen && prefer && (chosen->details != prefer->details)) { crm_info("Not pre-allocating %s to %s because %s is better", rsc->id, pe__node_name(prefer), pe__node_name(chosen)); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = backup; pcmk__unassign_resource(rsc); chosen = NULL; backup = NULL; } if (chosen) { pe_node_t *local_node = pcmk__top_allowed_node(rsc, chosen); if (local_node) { local_node->count++; } else if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ pcmk__config_err("%s not found in %s (list of %d)", chosen->details->id, rsc->parent->id, g_hash_table_size(rsc->parent->allowed_nodes)); } } if(backup) { g_hash_table_destroy(backup); } return chosen; } static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all) { GList *gIter = NULL; gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; if (all || cons->score < 0 || cons->score == INFINITY) { pcmk__add_this_with(child, cons); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; if (!pcmk__colocation_has_influence(cons, child)) { continue; } if (all || cons->score < 0) { pcmk__add_with_this(child, cons); } } } void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes, int max, int per_host_max, pe_working_set_t * data_set); void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes, int max, int per_host_max, pe_working_set_t * data_set) { int loop_max = 0; int allocated = 0; int available_nodes = 0; bool all_coloc = false; /* count now tracks the number of clones currently allocated */ for(GList *nIter = nodes; nIter != NULL; nIter = nIter->next) { pe_node_t *node = nIter->data; node->count = 0; if (pcmk__node_available(node, false, false)) { available_nodes++; } } all_coloc = (max < available_nodes) ? true : false; if(available_nodes) { loop_max = max / available_nodes; } if (loop_max < 1) { loop_max = 1; } pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)", max, rsc->id, available_nodes, per_host_max, loop_max); /* Pre-allocate as many instances as we can to their current location */ for (GList *gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe_node_t *child_node = NULL; pe_node_t *local_node = NULL; if ((child->running_on == NULL) || !pcmk_is_set(child->flags, pe_rsc_provisional) || pcmk_is_set(child->flags, pe_rsc_failed)) { continue; } child_node = pe__current_node(child); local_node = pcmk__top_allowed_node(child, child_node); pe_rsc_trace(rsc, "Checking pre-allocation of %s to %s (%d remaining of %d)", child->id, pe__node_name(child_node), max - allocated, max); if (!pcmk__node_available(child_node, true, false)) { pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s", pe__node_name(child_node), child->id); continue; } if ((local_node != NULL) && (local_node->count >= loop_max)) { pe_rsc_trace(rsc, "Not pre-allocating because %s already allocated " "optimal instances", pe__node_name(child_node)); continue; } if (allocate_instance(child, child_node, all_coloc, per_host_max, data_set)) { pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id, pe__node_name(child_node)); allocated++; } } pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max); for (GList *gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; if (child->running_on != NULL) { pe_node_t *child_node = pe__current_node(child); pe_node_t *local_node = pcmk__top_allowed_node(child, child_node); if (local_node == NULL) { crm_err("%s is running on %s which isn't allowed", child->id, pe__node_name(child_node)); } } if (!pcmk_is_set(child->flags, pe_rsc_provisional)) { } else if (allocated >= max) { pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max); resource_location(child, NULL, -INFINITY, "clone:limit_reached", data_set); } else { if (allocate_instance(child, NULL, all_coloc, per_host_max, data_set)) { allocated++; } } } pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d", allocated, rsc->id, max); } /*! * \internal * \brief Assign a clone resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t * pcmk__clone_allocate(pe_resource_t *rsc, const pe_node_t *prefer) { GList *nodes = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__add_promotion_scores(rsc); } pe__set_resource_flags(rsc, pe_rsc_allocating); /* This information is used by pcmk__cmp_instance() when deciding the order * in which to assign clone instances to nodes. */ for (GList *gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; pe_rsc_trace(rsc, "%s: Allocating %s first", rsc->id, constraint->primary->id); constraint->primary->cmds->assign(constraint->primary, prefer); } for (GList *gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; if (pcmk__colocation_has_influence(constraint, NULL)) { pe_resource_t *dependent = constraint->dependent; const char *attr = constraint->node_attribute; const float factor = constraint->score / (float) INFINITY; const uint32_t flags = pcmk__coloc_select_active |pcmk__coloc_select_nonnegative; pcmk__add_colocated_node_scores(dependent, rsc->id, &rsc->allowed_nodes, attr, factor, flags); } } pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = pcmk__sort_nodes(nodes, NULL); rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance); distribute_children(rsc, rsc->children, nodes, clone_data->clone_max, clone_data->clone_node_max, rsc->cluster); g_list_free(nodes); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__set_instance_roles(rsc); } pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating); pe_rsc_trace(rsc, "Done allocating %s", rsc->id); return NULL; } static void clone_update_pseudo_status(pe_resource_t * rsc, gboolean * stopping, gboolean * starting, gboolean * active) { GList *gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; clone_update_pseudo_status(child, stopping, starting, active); } return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if (rsc->running_on) { *active = TRUE; } gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (*starting && *stopping) { return; } else if (pcmk_is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid); continue; } else if (!pcmk_any_flags_set(action->flags, pe_action_pseudo|pe_action_runnable)) { pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid); continue; } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)) { pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid); *stopping = TRUE; } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)) { if (!pcmk_is_set(action->flags, pe_action_runnable)) { pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, pcmk_is_set(action->flags, pe_action_runnable), pcmk_is_set(action->flags, pe_action_pseudo)); } else { pe_rsc_trace(rsc, "Starting due to: %s", action->uuid); pe_rsc_trace(rsc, "%s run=%d, pseudo=%d", action->uuid, pcmk_is_set(action->flags, pe_action_runnable), pcmk_is_set(action->flags, pe_action_pseudo)); *starting = TRUE; } } } } static pe_action_t * find_rsc_action(pe_resource_t *rsc, const char *task) { pe_action_t *match = NULL; GList *actions = pe__resource_actions(rsc, NULL, task, FALSE); for (GList *item = actions; item != NULL; item = item->next) { pe_action_t *op = (pe_action_t *) item->data; if (!pcmk_is_set(op->flags, pe_action_optional)) { if (match != NULL) { // More than one match, don't return any match = NULL; break; } match = op; } } g_list_free(actions); return match; } static void child_ordering_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *stop = NULL; pe_action_t *start = NULL; pe_action_t *last_stop = NULL; pe_action_t *last_start = NULL; GList *gIter = NULL; if (!pe__clone_is_ordered(rsc)) { return; } /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; stop = find_rsc_action(child, RSC_STOP); if (stop) { if (last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_optional); } last_stop = stop; } start = find_rsc_action(child, RSC_START); if (start) { if (last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_optional); } last_start = start; } } } void clone_create_actions(pe_resource_t *rsc) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_debug(rsc, "Creating actions for clone %s", rsc->id); clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify, &clone_data->stop_notify); child_ordering_constraints(rsc, rsc->cluster); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__create_promotable_actions(rsc); } } void clone_create_pseudo_actions(pe_resource_t *rsc, GList *children, notify_data_t **start_notify, notify_data_t **stop_notify) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; gboolean allow_dependent_migrations = TRUE; pe_action_t *stop = NULL; pe_action_t *stopped = NULL; pe_action_t *start = NULL; pe_action_t *started = NULL; pe_rsc_trace(rsc, "Creating actions for %s", rsc->id); for (GList *gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean starting = FALSE; gboolean stopping = FALSE; child_rsc->cmds->create_actions(child_rsc); clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active); if (stopping && starting) { allow_dependent_migrations = FALSE; } child_stopping |= stopping; child_starting |= starting; } /* start */ start = pe__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true); started = pe__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting, false); started->priority = INFINITY; if (child_active || child_starting) { pe__set_action_flags(started, pe_action_runnable); } if (start_notify != NULL && *start_notify == NULL) { *start_notify = pe__clone_notif_pseudo_ops(rsc, RSC_START, start, started); } /* stop */ stop = pe__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true); stopped = pe__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping, true); stopped->priority = INFINITY; if (allow_dependent_migrations) { pe__set_action_flags(stop, pe_action_migrate_runnable); } if (stop_notify != NULL && *stop_notify == NULL) { *stop_notify = pe__clone_notif_pseudo_ops(rsc, RSC_STOP, stop, stopped); if (start_notify && *start_notify && *stop_notify) { order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional); } } } void clone_internal_constraints(pe_resource_t *rsc) { pe_resource_t *last_rsc = NULL; GList *gIter; bool ordered = pe__clone_is_ordered(rsc); pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id); pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional); pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left); pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional); pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_runnable_left); } if (ordered) { /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->internal_constraints(child_rsc); pcmk__order_starts(rsc, child_rsc, pe_order_runnable_left|pe_order_implies_first_printed); pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed); if (ordered && (last_rsc != NULL)) { pcmk__order_starts(last_rsc, child_rsc, pe_order_optional); } pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed); pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed); if (ordered && (last_rsc != NULL)) { pcmk__order_stops(child_rsc, last_rsc, pe_order_optional); } last_rsc = child_rsc; } if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__order_promotable_instances(rsc); } } gboolean is_child_compatible(const pe_resource_t *child_rsc, const pe_node_t *local_node, enum rsc_role_e filter, gboolean current) { pe_node_t *node = NULL; enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); CRM_CHECK(child_rsc && local_node, return FALSE); if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { /* We only want instances that haven't failed */ node = child_rsc->fns->location(child_rsc, NULL, current); } if (filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_trace("Filtered %s", child_rsc->id); return FALSE; } if (node && (node->details == local_node->details)) { return TRUE; } else if (node) { crm_trace("%s - %s vs %s", child_rsc->id, pe__node_name(node), pe__node_name(local_node)); } else { crm_trace("%s - not allocated %d", child_rsc->id, current); } return FALSE; } pe_resource_t * find_compatible_child(const pe_resource_t *local_child, const pe_resource_t *rsc, enum rsc_role_e filter, gboolean current) { pe_resource_t *pair = NULL; GList *gIter = NULL; GList *scratch = NULL; pe_node_t *local_node = NULL; local_node = local_child->fns->location(local_child, NULL, current); if (local_node) { return find_compatible_child_by_node(local_child, local_node, rsc, filter, current); } scratch = g_hash_table_get_values(local_child->allowed_nodes); scratch = pcmk__sort_nodes(scratch, NULL); gIter = scratch; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pair = find_compatible_child_by_node(local_child, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id); done: g_list_free(scratch); return pair; } /*! * \internal * \brief Apply a colocation's score to node weights or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node weights (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__clone_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { GList *gIter = NULL; gboolean do_interleave = FALSE; const char *interleave_s = NULL; /* This should never be called for the clone itself as a dependent. Instead, * we add its colocation constraints to its instances and call the * apply_coloc_score() for the instances as dependents. */ CRM_ASSERT(!for_dependent); CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL), return); CRM_CHECK(dependent->variant == pe_native, return); pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d", colocation->id, dependent->id, primary->id, colocation->score); if (pcmk_is_set(primary->flags, pe_rsc_promotable)) { if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { // We haven't placed the primary yet, so we can't apply colocation pe_rsc_trace(primary, "%s is still provisional", primary->id); return; } else if (colocation->primary_role == RSC_ROLE_UNKNOWN) { // This isn't a role-specfic colocation, so handle normally pe_rsc_trace(primary, "Handling %s as a clone colocation", colocation->id); } else if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) { // We're placing the dependent pcmk__update_dependent_with_promotable(primary, dependent, colocation); return; } else if (colocation->dependent_role == RSC_ROLE_PROMOTED) { // We're choosing roles for the dependent pcmk__update_promotable_dependent_priority(primary, dependent, colocation); return; } } // Only the dependent needs to be marked for interleave interleave_s = g_hash_table_lookup(colocation->dependent->meta, XML_RSC_ATTR_INTERLEAVE); if (crm_is_true(interleave_s) && (colocation->dependent->variant > pe_group)) { /* @TODO Do we actually care about multiple primary copies sharing a * dependent copy anymore? */ if (copies_per_node(colocation->dependent) != copies_per_node(colocation->primary)) { pcmk__config_err("Cannot interleave %s and %s because they do not " "support the same number of instances per node", colocation->dependent->id, colocation->primary->id); } else { do_interleave = TRUE; } } if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { pe_rsc_trace(primary, "%s is still provisional", primary->id); return; } else if (do_interleave) { pe_resource_t *primary_instance = NULL; primary_instance = find_compatible_child(dependent, primary, RSC_ROLE_UNKNOWN, FALSE); if (primary_instance != NULL) { pe_rsc_debug(primary, "Pairing %s with %s", dependent->id, primary_instance->id); dependent->cmds->apply_coloc_score(dependent, primary_instance, colocation, true); } else if (colocation->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", dependent->id, primary->id); pcmk__assign_resource(dependent, NULL, true); } else { pe_rsc_debug(primary, "Cannot pair %s with instance of %s", dependent->id, primary->id); } return; } else if (colocation->score >= INFINITY) { GList *affected_nodes = NULL; gIter = primary->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { pe_rsc_trace(primary, "Allowing %s: %s %d", colocation->id, pe__node_name(chosen), chosen->weight); affected_nodes = g_list_prepend(affected_nodes, chosen); } } node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE); g_list_free(affected_nodes); return; } gIter = primary->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation, false); } } enum action_tasks clone_child_action(pe_action_t * action) { enum action_tasks result = no_action; pe_resource_t *child = (pe_resource_t *) action->rsc->children->data; if (pcmk__strcase_any_of(action->task, "notify", "notified", NULL)) { /* Find the action we're notifying about instead */ int stop = 0; char *key = action->uuid; int lpc = strlen(key); for (; lpc > 0; lpc--) { if (key[lpc] == '_' && stop == 0) { stop = lpc; } else if (key[lpc] == '_') { char *task_mutable = NULL; lpc++; task_mutable = strdup(key + lpc); task_mutable[stop - lpc] = 0; crm_trace("Extracted action '%s' from '%s'", task_mutable, key); result = get_complex_task(child, task_mutable, TRUE); free(task_mutable); break; } } } else { result = get_complex_task(child, action->task, TRUE); } return result; } #define pe__clear_action_summary_flags(flags, action, flag) do { \ flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Action summary", action->rsc->id, \ flags, flag, #flag); \ } while (0) enum pe_action_flags summary_action_flags(pe_action_t *action, GList *children, const pe_node_t *node) { GList *gIter = NULL; gboolean any_runnable = FALSE; gboolean check_runnable = TRUE; enum action_tasks task = clone_child_action(action); enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); const char *task_s = task2text(task); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_action_t *child_action = NULL; pe_resource_t *child = (pe_resource_t *) gIter->data; child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node); pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id, pe__node_name(node), child_action?child_action->uuid:"NA"); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (pcmk_is_set(flags, pe_action_optional) && !pcmk_is_set(child_flags, pe_action_optional)) { pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid, child_action->uuid); pe__clear_action_summary_flags(flags, action, pe_action_optional); pe__clear_action_flags(action, pe_action_optional); } if (pcmk_is_set(child_flags, pe_action_runnable)) { any_runnable = TRUE; } } } if (check_runnable && any_runnable == FALSE) { pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid); pe__clear_action_summary_flags(flags, action, pe_action_runnable); if (node == NULL) { pe__clear_action_flags(action, pe_action_runnable); } } return flags; } enum pe_action_flags clone_action_flags(pe_action_t *action, const pe_node_t *node) { return summary_action_flags(action, action->rsc->children, node); } void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { GList *gIter = rsc->children; pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); pcmk__apply_location(rsc, constraint); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->apply_location(child_rsc, constraint); } } /*! * \internal * \brief Add a resource's actions to the transition graph * - * \param[in] rsc Resource whose actions should be added + * \param[in,out] rsc Resource whose actions should be added */ void clone_expand(pe_resource_t *rsc) { GList *gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL); pe__create_notifications(rsc, clone_data->start_notify); pe__create_notifications(rsc, clone_data->stop_notify); pe__create_notifications(rsc, clone_data->promote_notify); pe__create_notifications(rsc, clone_data->demote_notify); /* Now that the notifcations have been created we can expand the children */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->add_actions_to_graph(child_rsc); } pcmk__add_rsc_actions_to_graph(rsc); /* The notifications are in the graph now, we can destroy the notify_data */ pe__free_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; pe__free_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; pe__free_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; pe__free_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } // Check whether a resource or any of its children is known on node static bool rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node) { if (rsc->children) { for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { pe_resource_t *child = (pe_resource_t *) child_iter->data; if (rsc_known_on(child, node)) { return TRUE; } } } else if (rsc->known_on) { GHashTableIter iter; pe_node_t *known_node = NULL; g_hash_table_iter_init(&iter, rsc->known_on); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) { if (node->details == known_node->details) { return TRUE; } } } return FALSE; } // Look for an instance of clone that is known on node static pe_resource_t * find_instance_on(const pe_resource_t *clone, const pe_node_t *node) { for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; if (rsc_known_on(child, node)) { return child; } } return NULL; } // For anonymous clones, only a single instance needs to be probed static bool probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { // First, check if we probed an instance on this node last time pe_resource_t *child = find_instance_on(rsc, node); // Otherwise, check if we plan to start an instance on this node if (child == NULL) { for (GList *child_iter = rsc->children; child_iter && !child; child_iter = child_iter->next) { pe_node_t *local_node = NULL; pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data; if (child_rsc) { /* make clang analyzer happy */ local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if (local_node && (local_node->details == node->details)) { child = child_rsc; } } } } // Otherwise, use the first clone instance if (child == NULL) { child = rsc->children->data; } CRM_ASSERT(child); return child->cmds->create_probe(child, node); } /*! * \internal * * \brief Schedule any probes needed for a resource on a node * - * \param[in] rsc Resource to create probe for - * \param[in] node Node to create probe on + * \param[in,out] rsc Resource to create probe for + * \param[in,out] node Node to create probe on * * \return true if any probe was created, otherwise false */ bool clone_create_probe(pe_resource_t *rsc, pe_node_t *node) { CRM_ASSERT(rsc); rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number); if (rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return false; } if (rsc->exclusive_discover) { pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on * * remove the node from allowed_nodes so that the * notification contains only nodes that we might ever run * on */ g_hash_table_remove(rsc->allowed_nodes, node->details->id); /* Bit of a shortcut - might as well take it */ return false; } } if (pcmk_is_set(rsc->flags, pe_rsc_unique)) { return pcmk__probe_resource_list(rsc->children, node); } else { return probe_anonymous_clone(rsc, node, rsc->cluster); } } void clone_append_meta(const pe_resource_t *rsc, xmlNode *xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); name = crm_meta_name(XML_RSC_ATTR_UNIQUE); crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique)); free(name); name = crm_meta_name(XML_RSC_ATTR_NOTIFY); crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify)); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX); crm_xml_add_int(xml, name, clone_data->clone_max); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX); crm_xml_add_int(xml, name, clone_data->clone_node_max); free(name); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { int promoted_max = pe__clone_promoted_max(rsc); int promoted_node_max = pe__clone_promoted_node_max(rsc); name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX); crm_xml_add_int(xml, name, promoted_max); free(name); name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX); crm_xml_add_int(xml, name, promoted_node_max); free(name); /* @COMPAT Maintain backward compatibility with resource agents that * expect the old names (deprecated since 2.0.0). */ name = crm_meta_name(PCMK_XA_PROMOTED_MAX_LEGACY); crm_xml_add_int(xml, name, promoted_max); free(name); name = crm_meta_name(PCMK_XA_PROMOTED_NODE_MAX_LEGACY); crm_xml_add_int(xml, name, promoted_node_max); free(name); } } // Clone implementation of resource_alloc_functions_t:add_utilization() void pcmk__clone_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { bool existing = false; pe_resource_t *child = NULL; if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } // Look for any child already existing in the list for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { child = (pe_resource_t *) iter->data; if (g_list_find(all_rscs, child)) { existing = true; // Keep checking remaining children } else { // If this is a clone of a group, look for group's members for (GList *member_iter = child->children; member_iter != NULL; member_iter = member_iter->next) { pe_resource_t *member = (pe_resource_t *) member_iter->data; if (g_list_find(all_rscs, member) != NULL) { // Add *child's* utilization, not group member's child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization); existing = true; break; } } } } if (!existing && (rsc->children != NULL)) { // If nothing was found, still add first child's utilization child = (pe_resource_t *) rsc->children->data; child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization); } } // Clone implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__clone_shutdown_lock(pe_resource_t *rsc) { return; // Clones currently don't support shutdown locks } diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c index 827a7af08f..c9126402b6 100644 --- a/lib/pacemaker/pcmk_sched_fencing.c +++ b/lib/pacemaker/pcmk_sched_fencing.c @@ -1,498 +1,493 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Check whether a resource is known on a particular node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return TRUE if resource (or parent if an anonymous clone) is known */ static bool -rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node) +rsc_is_known_on(const pe_resource_t *rsc, const pe_node_t *node) { if (pe_hash_table_lookup(rsc->known_on, node->details->id)) { return TRUE; } else if ((rsc->variant == pe_native) && pe_rsc_is_anon_clone(rsc->parent) && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) { /* We check only the parent, not the uber-parent, because we cannot * assume that the resource is known if it is in an anonymously cloned * group (which may be only partially known). */ return TRUE; } return FALSE; } /*! * \internal * \brief Order a resource's start and promote actions relative to fencing * - * \param[in] rsc Resource to be ordered - * \param[in] stonith_op Fence action - * \param[in] data_set Cluster working set + * \param[in,out] rsc Resource to be ordered + * \param[in,out] stonith_op Fence action */ static void -order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op, - pe_working_set_t *data_set) +order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op) { pe_node_t *target; GList *gIter = NULL; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; switch (action->needs) { case rsc_req_nothing: // Anything other than start or promote requires nothing break; case rsc_req_stonith: order_actions(stonith_op, action, pe_order_optional); break; case rsc_req_quorum: if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id) && !rsc_is_known_on(rsc, target)) { /* If we don't know the status of the resource on the node * we're about to shoot, we have to assume it may be active * there. Order the resource start after the fencing. This * is analogous to waiting for all the probes for a resource * to complete before starting it. * * The most likely explanation is that the DC died and took * its status with it. */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, pe__node_name(target)); order_actions(stonith_op, action, pe_order_optional | pe_order_runnable_left); } break; } } } /*! * \internal * \brief Order a resource's stop and demote actions relative to fencing * - * \param[in] rsc Resource to be ordered - * \param[in] stonith_op Fence action - * \param[in] data_set Cluster working set + * \param[in,out] rsc Resource to be ordered + * \param[in,out] stonith_op Fence action */ static void -order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op, - pe_working_set_t *data_set) +order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op) { GList *gIter = NULL; GList *action_list = NULL; bool order_implicit = false; pe_resource_t *top = uber_parent(rsc); pe_action_t *parent_stop = NULL; pe_node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Get a list of stop actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE); /* If resource requires fencing, implicit actions must occur after fencing. * * Implied stops and demotes of resources running on guest nodes are always * ordered after fencing, even if the resource does not require fencing, * because guest node "fencing" is actually just a resource stop. */ if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) { order_implicit = true; } if (action_list && order_implicit) { parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); } for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; // The stop would never complete, so convert it into a pseudo-action. pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable); if (order_implicit) { pe__set_action_flags(action, pe_action_implied_by_stonith); /* Order the stonith before the parent stop (if any). * * Also order the stonith before the resource stop, unless the * resource is inside a bundle -- that would cause a graph loop. * We can rely on the parent stop's ordering instead. * * User constraints must not order a resource in a guest node * relative to the guest node container resource. The * pe_order_preserve flag marks constraints as generated by the * cluster and thus immune to that check (and is irrelevant if * target is not a guest). */ if (!pe_rsc_is_bundled(rsc)) { order_actions(stonith_op, action, pe_order_preserve); } order_actions(stonith_op, parent_stop, pe_order_preserve); } if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is implicit %s %s is fenced", rsc->id, (order_implicit? "after" : "because"), pe__node_name(target)); } else { crm_info("%s is implicit %s %s is fenced", action->uuid, (order_implicit? "after" : "because"), pe__node_name(target)); } if (pcmk_is_set(rsc->flags, pe_rsc_notify)) { pe__order_notifs_after_fencing(action, rsc, stonith_op); } #if 0 /* It might be a good idea to stop healthy resources on a node about to * be fenced, when possible. * * However, fencing must be done before a failed resource's * (pseudo-)stop action, so that could create a loop. For example, given * a group of A and B running on node N with a failed stop of B: * * fence N -> stop B (pseudo-op) -> stop A -> fence N * * The block below creates the stop A -> fence N ordering and therefore * must (at least for now) be disabled. Instead, run the block above and * treat all resources on N as B would be (i.e., as a pseudo-op after * the fencing). * * @TODO Maybe break the "A requires B" dependency in * pcmk__update_action_for_orderings() and use this block for healthy * resources instead of the above. */ crm_info("Moving healthy resource %s off %s before fencing", rsc->id, pe__node_name(node)); pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, strdup(CRM_OP_FENCE), stonith_op, - pe_order_optional, data_set); + pe_order_optional, rsc->cluster); #endif } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (!(action->node->details->online) || action->node->details->unclean || pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is implicit after %s is fenced", rsc->id, pe__node_name(target)); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, pe__node_name(target)); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable); if (pe_rsc_is_bundled(rsc)) { // Do nothing, let recovery be ordered after parent's implied stop } else if (order_implicit) { order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); } } } g_list_free(action_list); } /*! * \internal * \brief Order resource actions properly relative to fencing * - * \param[in] rsc Resource whose actions should be ordered - * \param[in] stonith_op Fencing operation to be ordered against - * \param[in] data_set Cluster working set + * \param[in,out] rsc Resource whose actions should be ordered + * \param[in,out] stonith_op Fencing operation to be ordered against */ static void -rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, - pe_working_set_t *data_set) +rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op) { if (rsc->children) { GList *gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; - rsc_stonith_ordering(child_rsc, stonith_op, data_set); + rsc_stonith_ordering(child_rsc, stonith_op); } } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); } else { - order_start_vs_fencing(rsc, stonith_op, data_set); - order_stop_vs_fencing(rsc, stonith_op, data_set); + order_start_vs_fencing(rsc, stonith_op); + order_stop_vs_fencing(rsc, stonith_op); } } /*! * \internal * \brief Order all actions appropriately relative to a fencing operation * * Ensure start operations of affected resources are ordered after fencing, * imply stop and demote operations of affected resources by marking them as * pseudo-actions, etc. * - * \param[in] stonith_op Fencing operation + * \param[in,out] stonith_op Fencing operation * \param[in,out] data_set Working set of cluster */ void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set) { CRM_CHECK(stonith_op && data_set, return); for (GList *r = data_set->resources; r != NULL; r = r->next) { - rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op, data_set); + rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op); } } /*! * \internal * \brief Order an action after unfencing * - * \param[in] rsc Resource that action is for - * \param[in] node Node that action is on - * \param[in] action Action to be ordered after unfencing - * \param[in] order Ordering flags + * \param[in] rsc Resource that action is for + * \param[in,out] node Node that action is on + * \param[in,out] action Action to be ordered after unfencing + * \param[in] order Ordering flags */ void -pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, - enum pe_ordering order) +pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node, + pe_action_t *action, enum pe_ordering order) { /* When unfencing is in use, we order unfence actions before any probe or * start of resources that require unfencing, and also of fence devices. * * This might seem to violate the principle that fence devices require * only quorum. However, fence agents that unfence often don't have enough * information to even probe or start unless the node is first unfenced. */ if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) || pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { /* Start with an optional ordering. Requiring unfencing would result in * the node being unfenced, and all its resources being stopped, * whenever a new resource is added -- which would be highly suboptimal. */ pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, - rsc->cluster); + node->details->data_set); order_actions(unfence, action, order); if (!pcmk__node_unfenced(node)) { // But unfencing is required if it has never been done char *reason = crm_strdup_printf("required by %s %s", rsc->id, action->task); - trigger_unfencing(NULL, node, reason, NULL, rsc->cluster); + trigger_unfencing(NULL, node, reason, NULL, + node->details->data_set); free(reason); } } } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * - * \param[in] node Guest node to fence + * \param[in,out] node Guest node to fence */ void pcmk__fence_guest(pe_node_t *node) { pe_resource_t *container = NULL; pe_action_t *stop = NULL; pe_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (container recovery). */ const char *fence_action = "off"; CRM_ASSERT(node != NULL); /* Check whether guest's container resource has any explicit stop or * start (the stop may be implied by fencing of the guest's host). */ container = node->details->remote_rsc->container; if (container) { stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL); if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) { fence_action = "reboot"; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and the controller can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, node->details->data_set); pe__set_action_flags(stonith_op, pe_action_pseudo|pe_action_runnable); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) { pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, node->details->data_set); crm_info("Implying guest %s is down (action %d) after %s fencing", pe__node_name(node), stonith_op->id, pe__node_name(stop->node)); order_actions(parent_stonith_op, stonith_op, pe_order_runnable_left|pe_order_implies_then); } else if (stop) { order_actions(stop, stonith_op, pe_order_runnable_left|pe_order_implies_then); crm_info("Implying guest %s is down (action %d) " "after container %s is stopped (action %d)", pe__node_name(node), stonith_op->id, container->id, stop->id); } else { /* If we're fencing the guest node but there's no stop for the guest * resource, we must think the guest is already stopped. However, we may * think so because its resource history was just cleaned. To avoid * unnecessarily considering the guest node down if it's really up, * order the pseudo-fencing after any stop of the connection resource, * which will be ordered after any container (re-)probe. */ stop = find_first_action(node->details->remote_rsc->actions, NULL, RSC_STOP, NULL); if (stop) { order_actions(stop, stonith_op, pe_order_optional); crm_info("Implying guest %s is down (action %d) " "after connection is stopped (action %d)", pe__node_name(node), stonith_op->id, stop->id); } else { /* Not sure why we're fencing, but everything must already be * cleanly stopped. */ crm_info("Implying guest %s is down (action %d) ", pe__node_name(node), stonith_op->id); } } // Order/imply other actions relative to pseudo-fence as with real fence pcmk__order_vs_fence(stonith_op, node->details->data_set); } /*! * \internal * \brief Check whether node has already been unfenced * * \param[in] node Node to check * * \return true if node has a nonzero #node-unfenced attribute (or none), * otherwise false */ bool -pcmk__node_unfenced(pe_node_t *node) +pcmk__node_unfenced(const pe_node_t *node) { const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED); return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches); } /*! * \internal * \brief Order a resource's start and stop relative to unfencing of a node * - * \param[in] data Node that could be unfenced + * \param[in,out] data Node that could be unfenced * \param[in,out] user_data Resource to order */ void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data) { pe_node_t *node = (pe_node_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; pe_action_t *unfence = pe_fence_op(node, "on", true, NULL, false, rsc->cluster); crm_debug("Ordering any stops of %s before %s, and any starts after", rsc->id, unfence->uuid); /* * It would be more efficient to order clone resources once, * rather than order each instance, but ordering the instance * allows us to avoid unnecessary dependencies that might conflict * with user constraints. * * @TODO: This constraint can still produce a transition loop if the * resource has a stop scheduled on the node being unfenced, and * there is a user ordering constraint to start some other resource * (which will be ordered after the unfence) before stopping this * resource. An example is "start some slow-starting cloned service * before stopping an associated virtual IP that may be moving to * it": * stop this -> unfencing -> start that -> stop this */ pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, strdup(unfence->uuid), unfence, pe_order_optional|pe_order_same_node, rsc->cluster); pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence, rsc, start_key(rsc), NULL, pe_order_implies_then_on_node|pe_order_same_node, rsc->cluster); } diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c index 1122092725..d7d5ba4616 100644 --- a/lib/pacemaker/pcmk_sched_nodes.c +++ b/lib/pacemaker/pcmk_sched_nodes.c @@ -1,352 +1,351 @@ /* - * Copyright 2004-2022 the Pacemaker project contributors + * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include // lrmd_event_data_t #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Check whether a node is available to run resources * * \param[in] node Node to check * \param[in] consider_score If true, consider a negative score unavailable * \param[in] consider_guest If true, consider a guest node unavailable whose * resource will not be active * * \return true if node is online and not shutting down, unclean, or in standby * or maintenance mode, otherwise false */ bool pcmk__node_available(const pe_node_t *node, bool consider_score, bool consider_guest) { if ((node == NULL) || (node->details == NULL) || !node->details->online || node->details->shutdown || node->details->unclean || node->details->standby || node->details->maintenance) { return false; } if (consider_score && (node->weight < 0)) { return false; } // @TODO Go through all callers to see which should set consider_guest if (consider_guest && pe__is_guest_node(node)) { pe_resource_t *guest = node->details->remote_rsc->container; if (guest->fns->location(guest, NULL, FALSE) == NULL) { return false; } } return true; } /*! * \internal * \brief Copy a hash table of node objects * * \param[in] nodes Hash table to copy * * \return New copy of nodes (or NULL if nodes is NULL) */ GHashTable * pcmk__copy_node_table(GHashTable *nodes) { GHashTable *new_table = NULL; GHashTableIter iter; pe_node_t *node = NULL; if (nodes == NULL) { return NULL; } new_table = pcmk__strkey_table(NULL, free); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { pe_node_t *new_node = pe__copy_node(node); g_hash_table_insert(new_table, (gpointer) new_node->details->id, new_node); } return new_table; } /*! * \internal * \brief Copy a list of node objects * * \param[in] list List to copy * \param[in] reset Set copies' scores to 0 * * \return New list of shallow copies of nodes in original list */ GList * pcmk__copy_node_list(const GList *list, bool reset) { GList *result = NULL; for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *new_node = NULL; pe_node_t *this_node = (pe_node_t *) gIter->data; new_node = pe__copy_node(this_node); if (reset) { new_node->weight = 0; } result = g_list_prepend(result, new_node); } return result; } /*! * \internal * \brief Compare two nodes for allocation desirability * * Given two nodes, check which one is more preferred by allocation criteria * such as node weight and utilization. * * \param[in] a First node to compare * \param[in] b Second node to compare - * \param[in] data Sort data (as struct node_weight_s *) + * \param[in] data Node that resource being assigned is active on, if any * * \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are * equally preferred */ static gint compare_nodes(gconstpointer a, gconstpointer b, gpointer data) { const pe_node_t *node1 = (const pe_node_t *) a; const pe_node_t *node2 = (const pe_node_t *) b; - pe_node_t *active = (pe_node_t *) data; + const pe_node_t *active = (const pe_node_t *) data; int node1_weight = 0; int node2_weight = 0; int result = 0; if (a == NULL) { return 1; } if (b == NULL) { return -1; } // Compare node weights node1_weight = pcmk__node_available(node1, false, false)? node1->weight : -INFINITY; node2_weight = pcmk__node_available(node2, false, false)? node2->weight : -INFINITY; if (node1_weight > node2_weight) { crm_trace("%s (%d) > %s (%d) : weight", pe__node_name(node1), node1_weight, pe__node_name(node2), node2_weight); return -1; } if (node1_weight < node2_weight) { crm_trace("%s (%d) < %s (%d) : weight", pe__node_name(node1), node1_weight, pe__node_name(node2), node2_weight); return 1; } crm_trace("%s (%d) == %s (%d) : weight", pe__node_name(node1), node1_weight, pe__node_name(node2), node2_weight); // If appropriate, compare node utilization if (pcmk__str_eq(node1->details->data_set->placement_strategy, "minimal", pcmk__str_casei)) { goto equal; } if (pcmk__str_eq(node1->details->data_set->placement_strategy, "balanced", pcmk__str_casei)) { result = pcmk__compare_node_capacities(node1, node2); if (result < 0) { crm_trace("%s > %s : capacity (%d)", pe__node_name(node1), pe__node_name(node2), result); return -1; } else if (result > 0) { crm_trace("%s < %s : capacity (%d)", pe__node_name(node1), pe__node_name(node2), result); return 1; } } // Compare number of allocated resources if (node1->details->num_resources < node2->details->num_resources) { crm_trace("%s (%d) > %s (%d) : resources", pe__node_name(node1), node1->details->num_resources, pe__node_name(node2), node2->details->num_resources); return -1; } else if (node1->details->num_resources > node2->details->num_resources) { crm_trace("%s (%d) < %s (%d) : resources", pe__node_name(node1), node1->details->num_resources, pe__node_name(node2), node2->details->num_resources); return 1; } // Check whether one node is already running desired resource if (active != NULL) { if (active->details == node1->details) { crm_trace("%s (%d) > %s (%d) : active", pe__node_name(node1), node1->details->num_resources, pe__node_name(node2), node2->details->num_resources); return -1; } else if (active->details == node2->details) { crm_trace("%s (%d) < %s (%d) : active", pe__node_name(node1), node1->details->num_resources, pe__node_name(node2), node2->details->num_resources); return 1; } } // If all else is equal, prefer node with lowest-sorting name equal: crm_trace("%s = %s", pe__node_name(node1), pe__node_name(node2)); return strcmp(node1->details->uname, node2->details->uname); } /*! * \internal * \brief Sort a list of nodes by allocation desirability * - * \param[in] nodes Node list to sort - * \param[in] active_node If not NULL, node currently running resource - * \param[in] data_set Cluster working set + * \param[in,out] nodes Node list to sort + * \param[in] active_node Node where resource being assigned is active * * \return New head of sorted list */ GList * pcmk__sort_nodes(GList *nodes, pe_node_t *active_node) { return g_list_sort_with_data(nodes, compare_nodes, active_node); } /*! * \internal * \brief Check whether any node is available to run resources * * \param[in] nodes Nodes to check * * \return true if any node in \p nodes is available to run resources, * otherwise false */ bool pcmk__any_node_available(GHashTable *nodes) { GHashTableIter iter; - pe_node_t *node = NULL; + const pe_node_t *node = NULL; if (nodes == NULL) { return false; } g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (pcmk__node_available(node, true, false)) { return true; } } return false; } /*! * \internal * \brief Apply node health values for all nodes in cluster * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ void pcmk__apply_node_health(pe_working_set_t *data_set) { int base_health = 0; enum pcmk__health_strategy strategy; const char *strategy_str = pe_pref(data_set->config_hash, PCMK__OPT_NODE_HEALTH_STRATEGY); strategy = pcmk__parse_health_strategy(strategy_str); if (strategy == pcmk__health_strategy_none) { return; } crm_info("Applying node health strategy '%s'", strategy_str); // The progressive strategy can use a base health score if (strategy == pcmk__health_strategy_progressive) { base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, data_set); } for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; int health = pe__sum_node_health_scores(node, base_health); // An overall health score of 0 has no effect if (health == 0) { continue; } crm_info("Overall system health of %s is %d", pe__node_name(node), health); // Use node health as a location score for each resource on the node for (GList *r = data_set->resources; r != NULL; r = r->next) { pe_resource_t *rsc = (pe_resource_t *) r->data; bool constrain = true; if (health < 0) { /* Negative health scores do not apply to resources with * allow-unhealthy-nodes=true. */ constrain = !crm_is_true(g_hash_table_lookup(rsc->meta, PCMK__META_ALLOW_UNHEALTHY_NODES)); } if (constrain) { pcmk__new_location(strategy_str, rsc, health, NULL, node, data_set); } else { pe_rsc_trace(rsc, "%s is immune from health ban on %s", rsc->id, pe__node_name(node)); } } } } /*! * \internal * \brief Check for a node in a resource's parent's allowed nodes * * \param[in] rsc Resource whose parent should be checked * \param[in] node Node to check for * * \return Equivalent of \p node from \p rsc's parent's allowed nodes if any, * otherwise NULL */ pe_node_t * pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node) { GHashTable *allowed_nodes = NULL; if ((rsc == NULL) || (node == NULL)) { return NULL; } else if (rsc->parent == NULL) { allowed_nodes = rsc->allowed_nodes; } else { allowed_nodes = rsc->parent->allowed_nodes; } return pe_hash_table_lookup(allowed_nodes, node->details->id); } diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c index d04e704c39..f8ba29337a 100644 --- a/lib/pacemaker/pcmk_sched_primitive.c +++ b/lib/pacemaker/pcmk_sched_primitive.c @@ -1,1519 +1,1518 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "libpacemaker_private.h" static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional); static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional); static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the immediate next role when transitioning from one role * to a target role. For example, when going from Stopped to Promoted, the * next role is Unpromoted, because the resource must be started before it * can be promoted. The current state then becomes Started, which is fed * into this array again, giving a next role of Promoted. * * Current role Immediate next role Final target role * ------------ ------------------- ----------------- */ /* Unknown */ { RSC_ROLE_UNKNOWN, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STOPPED, /* Started */ RSC_ROLE_STOPPED, /* Unpromoted */ RSC_ROLE_STOPPED, /* Promoted */ }, /* Stopped */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STARTED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_UNPROMOTED, /* Promoted */ }, /* Started */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STARTED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_PROMOTED, /* Promoted */ }, /* Unpromoted */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_STOPPED, /* Stopped */ RSC_ROLE_STOPPED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_PROMOTED, /* Promoted */ }, /* Promoted */ { RSC_ROLE_STOPPED, /* Unknown */ RSC_ROLE_UNPROMOTED, /* Stopped */ RSC_ROLE_UNPROMOTED, /* Started */ RSC_ROLE_UNPROMOTED, /* Unpromoted */ RSC_ROLE_PROMOTED, /* Promoted */ }, }; /*! * \internal * \brief Function to schedule actions needed for a role change * * \param[in,out] rsc Resource whose role is changing - * \param[in] node Node where resource will be in its next role + * \param[in,out] node Node where resource will be in its next role * \param[in] optional Whether scheduled actions should be optional */ typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node, bool optional); static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { /* This array lists the function needed to transition directly from one role * to another. NULL indicates that nothing is needed. * * Current role Transition function Next role * ------------ ------------------- ---------- */ /* Unknown */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ assert_role_error, /* Started */ assert_role_error, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Stopped */ { assert_role_error, /* Unknown */ NULL, /* Stopped */ start_resource, /* Started */ start_resource, /* Unpromoted */ assert_role_error, /* Promoted */ }, /* Started */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ NULL, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Unpromoted */ { assert_role_error, /* Unknown */ stop_resource, /* Stopped */ stop_resource, /* Started */ NULL, /* Unpromoted */ promote_resource, /* Promoted */ }, /* Promoted */ { assert_role_error, /* Unknown */ demote_resource, /* Stopped */ demote_resource, /* Started */ demote_resource, /* Unpromoted */ NULL, /* Promoted */ }, }; /*! * \internal * \brief Get a list of a resource's allowed nodes sorted by node weight * * \param[in] rsc Resource to check * * \return List of allowed nodes sorted by node weight */ static GList * sorted_allowed_nodes(const pe_resource_t *rsc) { if (rsc->allowed_nodes != NULL) { GList *nodes = g_hash_table_get_values(rsc->allowed_nodes); if (nodes != NULL) { return pcmk__sort_nodes(nodes, pe__current_node(rsc)); } } return NULL; } /*! * \internal * \brief Assign a resource to its best allowed node, if possible * * \param[in,out] rsc Resource to choose a node for * \param[in] prefer If not NULL, prefer this node when all else equal * * \return true if \p rsc could be assigned to a node, otherwise false */ static bool assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer) { GList *nodes = NULL; pe_node_t *chosen = NULL; pe_node_t *best = NULL; bool result = false; const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc); if (prefer == NULL) { prefer = most_free_node; } if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { // We've already finished assignment of resources to nodes return rsc->allocated_to != NULL; } // Sort allowed nodes by weight nodes = sorted_allowed_nodes(rsc); if (nodes != NULL) { best = (pe_node_t *) nodes->data; // First node has best score } if ((prefer != NULL) && (nodes != NULL)) { // Get the allowed node version of prefer chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (chosen == NULL) { pe_rsc_trace(rsc, "Preferred node %s for %s was unknown", pe__node_name(prefer), rsc->id); /* Favor the preferred node as long as its weight is at least as good as * the best allowed node's. * * An alternative would be to favor the preferred node even if the best * node is better, when the best node's weight is less than INFINITY. */ } else if (chosen->weight < best->weight) { pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable", pe__node_name(chosen), rsc->id); chosen = NULL; } else if (!pcmk__node_available(chosen, true, false)) { pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable", pe__node_name(chosen), rsc->id); chosen = NULL; } else { pe_rsc_trace(rsc, "Chose preferred node %s for %s (ignoring %d candidates)", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } } if ((chosen == NULL) && (best != NULL)) { /* Either there is no preferred node, or the preferred node is not * suitable, but another node is allowed to run the resource. */ chosen = best; if (!pe_rsc_is_unique_clone(rsc->parent) && (chosen->weight > 0) // Zero not acceptable && pcmk__node_available(chosen, false, false)) { /* If the resource is already running on a node, prefer that node if * it is just as good as the chosen node. * * We don't do this for unique clone instances, because * distribute_children() has already assigned instances to their * running nodes when appropriate, and if we get here, we don't want * remaining unassigned instances to prefer a node that's already * running another instance. */ pe_node_t *running = pe__current_node(rsc); if (running == NULL) { // Nothing to do } else if (!pcmk__node_available(running, true, false)) { pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources", rsc->id, pe__node_name(running)); } else { int nodes_with_best_score = 1; for (GList *iter = nodes->next; iter; iter = iter->next) { pe_node_t *allowed = (pe_node_t *) iter->data; if (allowed->weight != chosen->weight) { // The nodes are sorted by weight, so no more are equal break; } if (pe__same_node(allowed, running)) { // Scores are equal, so prefer the current node chosen = allowed; } nodes_with_best_score++; } if (nodes_with_best_score > 1) { do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO), "Chose %s for %s from %d nodes with score %s", pe__node_name(chosen), rsc->id, nodes_with_best_score, pcmk_readable_score(chosen->weight)); } } } pe_rsc_trace(rsc, "Chose %s for %s from %d candidates", pe__node_name(chosen), rsc->id, g_list_length(nodes)); } result = pcmk__finalize_assignment(rsc, chosen, false); g_list_free(nodes); return result; } /*! * \internal * \brief Apply a "this with" colocation to a node's allowed node scores * * \param[in,out] data Colocation to apply * \param[in,out] user_data Resource being assigned */ static void apply_this_with(gpointer data, gpointer user_data) { pcmk__colocation_t *colocation = (pcmk__colocation_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; GHashTable *archive = NULL; pe_resource_t *other = colocation->primary; // In certain cases, we will need to revert the node scores if ((colocation->dependent_role >= RSC_ROLE_PROMOTED) || ((colocation->score < 0) && (colocation->score > -INFINITY))) { archive = pcmk__copy_node_table(rsc->allowed_nodes); } pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first" "(score=%d role=%s)", rsc->id, colocation->id, other->id, colocation->score, role2text(colocation->dependent_role)); other->cmds->assign(other, NULL); // Apply the colocation score to this resource's allowed node scores rsc->cmds->apply_coloc_score(rsc, other, colocation, true); if ((archive != NULL) && !pcmk__any_node_available(rsc->allowed_nodes)) { pe_rsc_info(rsc, "%s: Reverting scores from colocation with %s " "because no nodes allowed", rsc->id, other->id); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = archive; archive = NULL; } if (archive != NULL) { g_hash_table_destroy(archive); } } /*! * \internal * \brief Apply a "with this" colocation to a node's allowed node scores * * \param[in,out] data Colocation to apply * \param[in,out] user_data Resource being assigned */ static void apply_with_this(void *data, void *user_data) { pcmk__colocation_t *colocation = (pcmk__colocation_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; pe_resource_t *other = colocation->dependent; const float factor = colocation->score / (float) INFINITY; if (!pcmk__colocation_has_influence(colocation, NULL)) { return; } pe_rsc_trace(rsc, "%s: Incorporating attenuated %s assignment scores due " "to colocation %s", rsc->id, other->id, colocation->id); pcmk__add_colocated_node_scores(other, rsc->id, &rsc->allowed_nodes, colocation->node_attribute, factor, pcmk__coloc_select_active); } /*! * \internal * \brief Update a Pacemaker Remote node once its connection has been assigned * * \param[in] connection Connection resource that has been assigned */ static void remote_connection_assigned(const pe_resource_t *connection) { pe_node_t *remote_node = pe_find_node(connection->cluster->nodes, connection->id); CRM_CHECK(remote_node != NULL, return); if ((connection->allocated_to != NULL) && (connection->next_role != RSC_ROLE_STOPPED)) { crm_trace("Pacemaker Remote node %s will be online", remote_node->details->id); remote_node->details->online = TRUE; if (remote_node->details->unseen) { // Avoid unnecessary fence, since we will attempt connection remote_node->details->unclean = FALSE; } } else { crm_trace("Pacemaker Remote node %s will be shut down " "(%sassigned connection's next role is %s)", remote_node->details->id, ((connection->allocated_to == NULL)? "un" : ""), role2text(connection->next_role)); remote_node->details->shutdown = TRUE; } } /*! * \internal * \brief Assign a primitive resource to a node * * \param[in,out] rsc Resource to assign to a node * \param[in] prefer Node to prefer, if all else is equal * * \return Node that \p rsc is assigned to, if assigned entirely to one node */ pe_node_t * pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer) { CRM_ASSERT(rsc != NULL); // Never assign a child without parent being assigned first if ((rsc->parent != NULL) && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "%s: Assigning parent %s first", rsc->id, rsc->parent->id); rsc->parent->cmds->assign(rsc->parent, prefer); } if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->allocated_to; // Assignment has already been done } // Ensure we detect assignment loops if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id); return NULL; } pe__set_resource_flags(rsc, pe_rsc_allocating); pe__show_node_weights(true, rsc, "Pre-assignment", rsc->allowed_nodes, rsc->cluster); g_list_foreach(rsc->rsc_cons, apply_this_with, rsc); pe__show_node_weights(true, rsc, "Post-this-with", rsc->allowed_nodes, rsc->cluster); g_list_foreach(rsc->rsc_cons_lhs, apply_with_this, rsc); if (rsc->next_role == RSC_ROLE_STOPPED) { pe_rsc_trace(rsc, "Banning %s from all nodes because it will be stopped", rsc->id); resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, rsc->cluster); } else if ((rsc->next_role > rsc->role) && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum) && (rsc->cluster->no_quorum_policy == no_quorum_freeze)) { crm_notice("Resource %s cannot be elevated from %s to %s due to " "no-quorum-policy=freeze", rsc->id, role2text(rsc->role), role2text(rsc->next_role)); pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze"); } pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, rsc->cluster); // Unmanage resource if fencing is enabled but no device is configured if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled) && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) { pe__clear_resource_flags(rsc, pe_rsc_managed); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { // Unmanaged resources stay on their current node const char *reason = NULL; pe_node_t *assign_to = NULL; pe__set_next_role(rsc, rsc->role, "unmanaged"); assign_to = pe__current_node(rsc); if (assign_to == NULL) { reason = "inactive"; } else if (rsc->role == RSC_ROLE_PROMOTED) { reason = "promoted"; } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { reason = "failed"; } else { reason = "active"; } pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id, (assign_to? assign_to->details->uname : "no node"), reason); pcmk__finalize_assignment(rsc, assign_to, true); } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) { pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id); pcmk__finalize_assignment(rsc, NULL, true); } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional) && assign_best_node(rsc, prefer)) { // Assignment successful } else if (rsc->allocated_to == NULL) { if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) { pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id); } else if (rsc->running_on != NULL) { pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id); } } else { pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, pe__node_name(rsc->allocated_to)); } pe__clear_resource_flags(rsc, pe_rsc_allocating); if (rsc->is_remote_node) { remote_connection_assigned(rsc); } return rsc->allocated_to; } /*! * \internal * \brief Schedule actions to bring resource down and back to current role * * \param[in,out] rsc Resource to restart - * \param[in] current Node that resource should be brought down on + * \param[in,out] current Node that resource should be brought down on * \param[in] need_stop Whether the resource must be stopped * \param[in] need_promote Whether the resource must be promoted * * \return Role that resource would have after scheduled actions are taken */ static void schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current, bool need_stop, bool need_promote) { enum rsc_role_e role = rsc->role; enum rsc_role_e next_role; rsc_transition_fn fn = NULL; pe__set_resource_flags(rsc, pe_rsc_restarting); // Bring resource down to a stop on its current node while (role != RSC_ROLE_STOPPED) { next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED]; pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s", (need_stop? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, current, !need_stop); role = next_role; } // Bring resource up to its next role on its next node while ((rsc->role <= rsc->next_role) && (role != rsc->role) && !pcmk_is_set(rsc->flags, pe_rsc_block)) { bool required = need_stop; next_role = rsc_state_matrix[role][rsc->role]; if ((next_role == RSC_ROLE_PROMOTED) && need_promote) { required = true; } pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s", (required? "required" : "optional"), rsc->id, role2text(role), role2text(next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, !required); role = next_role; } pe__clear_resource_flags(rsc, pe_rsc_restarting); } /*! * \internal * \brief If a resource's next role is not explicitly specified, set a default * * \param[in,out] rsc Resource to set next role for * * \return "explicit" if next role was explicitly set, otherwise "implicit" */ static const char * set_default_next_role(pe_resource_t *rsc) { if (rsc->next_role != RSC_ROLE_UNKNOWN) { return "explicit"; } if (rsc->allocated_to == NULL) { pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment"); } else { pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment"); } return "implicit"; } /*! * \internal * \brief Create an action to represent an already pending start * * \param[in,out] rsc Resource to create start action for */ static void create_pending_start(pe_resource_t *rsc) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating action for %s to represent already pending start", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); pe__set_action_flags(start, pe_action_print_always); } /*! * \internal * \brief Schedule actions needed to take a resource to its next role * * \param[in,out] rsc Resource to schedule actions for */ static void schedule_role_transition_actions(pe_resource_t *rsc) { enum rsc_role_e role = rsc->role; while (role != rsc->next_role) { enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role]; rsc_transition_fn fn = NULL; pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)", rsc->id, role2text(role), role2text(next_role), role2text(rsc->next_role)); fn = rsc_action_matrix[role][next_role]; if (fn == NULL) { break; } fn(rsc, rsc->allocated_to, false); role = next_role; } } /*! * \internal * \brief Create all actions needed for a given primitive resource * * \param[in,out] rsc Primitive resource to create actions for */ void pcmk__primitive_create_actions(pe_resource_t *rsc) { bool need_stop = false; bool need_promote = false; bool is_moving = false; bool allow_migrate = false; bool multiply_active = false; pe_node_t *current = NULL; unsigned int num_all_active = 0; unsigned int num_clean_active = 0; const char *next_role_source = NULL; CRM_ASSERT(rsc != NULL); next_role_source = set_default_next_role(rsc); pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s " "(%s) on %s", rsc->id, role2text(rsc->role), role2text(rsc->next_role), next_role_source, pe__node_name(rsc->allocated_to)); current = pe__find_active_on(rsc, &num_all_active, &num_clean_active); g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration, rsc); if ((current != NULL) && (rsc->allocated_to != NULL) && (current->details != rsc->allocated_to->details) && (rsc->next_role >= RSC_ROLE_STARTED)) { pe_rsc_trace(rsc, "Moving %s from %s to %s", rsc->id, pe__node_name(current), pe__node_name(rsc->allocated_to)); is_moving = true; allow_migrate = pcmk__rsc_can_migrate(rsc, current); // This is needed even if migrating (though I'm not sure why ...) need_stop = true; } // Check whether resource is partially migrated and/or multiply active if ((rsc->partial_migration_source != NULL) && (rsc->partial_migration_target != NULL) && allow_migrate && (num_all_active == 2) && pe__same_node(current, rsc->partial_migration_source) && pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) { /* A partial migration is in progress, and the migration target remains * the same as when the migration began. */ pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else if ((rsc->partial_migration_source != NULL) || (rsc->partial_migration_target != NULL)) { // A partial migration is in progress but can't be continued if (num_all_active > 2) { // The resource is migrating *and* multiply active! crm_notice("Forcing recovery of %s because it is migrating " "from %s to %s and possibly active elsewhere", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } else { // The migration source or target isn't available crm_notice("Forcing recovery of %s because it can no longer " "migrate from %s to %s", rsc->id, pe__node_name(rsc->partial_migration_source), pe__node_name(rsc->partial_migration_target)); } need_stop = true; rsc->partial_migration_source = rsc->partial_migration_target = NULL; allow_migrate = false; } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) { multiply_active = (num_all_active > 1); } else { /* If a resource has "requires" set to nothing or quorum, don't consider * it active on unclean nodes (similar to how all resources behave when * stonith-enabled is false). We can start such resources elsewhere * before fencing completes, and if we considered the resource active on * the failed node, we would attempt recovery for being active on * multiple nodes. */ multiply_active = (num_clean_active > 1); } if (multiply_active) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Resource was (possibly) incorrectly multiply active pe_proc_err("%s resource %s might be active on %u nodes (%s)", pcmk__s(class, "Untyped"), rsc->id, num_all_active, recovery2text(rsc->recovery_type)); crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ" "#Resource_is_Too_Active for more information"); switch (rsc->recovery_type) { case recovery_stop_start: need_stop = true; break; case recovery_stop_unexpected: need_stop = true; // stop_resource() will skip expected node pe__set_resource_flags(rsc, pe_rsc_stop_unexpected); break; default: break; } } else { pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected); } if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { create_pending_start(rsc); } if (is_moving) { // Remaining tests are only for resources staying where they are } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pe_rsc_stop)) { need_stop = true; pe_rsc_trace(rsc, "Recovering %s", rsc->id); } else { pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); if (rsc->next_role == RSC_ROLE_PROMOTED) { need_promote = true; } } } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id); need_stop = true; } else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL) && (rsc->allocated_to != NULL)) { pe_action_t *start = NULL; pe_rsc_trace(rsc, "Creating start action for promoted resource %s", rsc->id); start = start_action(rsc, rsc->allocated_to, TRUE); if (!pcmk_is_set(start->flags, pe_action_optional)) { // Recovery of a promoted resource pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id); need_stop = true; } } // Create any actions needed to bring resource down and back up to same role schedule_restart_actions(rsc, current, need_stop, need_promote); // Create any actions needed to take resource from this role to the next schedule_role_transition_actions(rsc); pcmk__create_recurring_actions(rsc); if (allow_migrate) { pcmk__create_migration_actions(rsc, current); } } /*! * \internal * \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes * * \param[in] rsc Resource to check */ static void rsc_avoids_remote_nodes(const pe_resource_t *rsc) { GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (node->details->remote_rsc != NULL) { node->weight = -INFINITY; } } } /*! * \internal * \brief Return allowed nodes as (possibly sorted) list * * Convert a resource's hash table of allowed nodes to a list. If printing to * stdout, sort the list, to keep action ID numbers consistent for regression * test output (while avoiding the performance hit on a live cluster). * * \param[in] rsc Resource to check for allowed nodes * * \return List of resource's allowed nodes * \note Callers should take care not to rely on the list being sorted. */ static GList * allowed_nodes_as_list(const pe_resource_t *rsc) { GList *allowed_nodes = NULL; if (rsc->allowed_nodes) { allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes); } if (!pcmk__is_daemon) { allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name); } return allowed_nodes; } /*! * \internal * \brief Create implicit constraints needed for a primitive resource * * \param[in,out] rsc Primitive resource to create implicit constraints for */ void pcmk__primitive_internal_constraints(pe_resource_t *rsc) { GList *allowed_nodes = NULL; bool check_unfencing = false; bool check_utilization = false; CRM_ASSERT(rsc != NULL); if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping implicit constraints for unmanaged resource %s", rsc->id); return; } // Whether resource requires unfencing check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing) && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing); // Whether a non-default placement strategy is used check_utilization = (g_hash_table_size(rsc->utilization) > 0) && !pcmk__str_eq(rsc->cluster->placement_strategy, "default", pcmk__str_casei); // Order stops before starts (i.e. restart) pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_optional|pe_order_implies_then|pe_order_restart, rsc->cluster); // Promotable ordering: demote before stop, start before promote if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_UNPROMOTED)) { pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, pe_order_promoted_implies_first, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL, pe_order_runnable_left, rsc->cluster); } // Don't clear resource history if probing on same node pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL, pe_order_same_node|pe_order_then_cancels_first, rsc->cluster); // Certain checks need allowed nodes if (check_unfencing || check_utilization || (rsc->container != NULL)) { allowed_nodes = allowed_nodes_as_list(rsc); } if (check_unfencing) { g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc); } if (check_utilization) { pcmk__create_utilization_constraints(rsc, allowed_nodes); } if (rsc->container != NULL) { pe_resource_t *remote_rsc = NULL; if (rsc->is_remote_node) { // rsc is the implicit remote connection for a guest or bundle node /* Guest resources are not allowed to run on Pacemaker Remote nodes, * to avoid nesting remotes. However, bundles are allowed. */ if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { rsc_avoids_remote_nodes(rsc->container); } /* If someone cleans up a guest or bundle node's container, we will * likely schedule a (re-)probe of the container and recovery of the * connection. Order the connection stop after the container probe, * so that if we detect the container running, we will trigger a new * transition and avoid the unnecessary recovery. */ pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc, RSC_STOP, pe_order_optional); /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location * constraints can accomplish the same thing. But we support it, so here * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; } else { remote_rsc = pe__resource_contains_guest_node(rsc->cluster, rsc->container); } if (remote_rsc != NULL) { /* Force the resource on the Pacemaker Remote node instead of * colocating the resource with the container resource. */ for (GList *item = allowed_nodes; item; item = item->next) { pe_node_t *node = item->data; if (node->details->remote_rsc != remote_rsc) { node->weight = -INFINITY; } } } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote * connection resource for a guest node or bundle. */ int score; crm_trace("Order and colocate %s relative to its container %s", rsc->id, rsc->container->id); pcmk__new_ordering(rsc->container, pcmk__op_key(rsc->container->id, RSC_START, 0), NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL, pe_order_implies_then|pe_order_runnable_left, rsc->cluster); pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL, rsc->container, pcmk__op_key(rsc->container->id, RSC_STOP, 0), NULL, pe_order_implies_first, rsc->cluster); if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) { score = 10000; /* Highly preferred but not essential */ } else { score = INFINITY; /* Force them to run on the same host */ } pcmk__new_colocation("resource-with-container", NULL, score, rsc, rsc->container, NULL, NULL, true, rsc->cluster); } } if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) { /* Remote connections and fencing devices are not allowed to run on * Pacemaker Remote nodes */ rsc_avoids_remote_nodes(rsc); } g_list_free(allowed_nodes); } /*! * \internal * \brief Apply a colocation's score to node weights or resource priority * * Given a colocation constraint, apply its score to the dependent's * allowed node weights (if we are still placing resources) or priority (if * we are choosing promotable clone instance roles). * * \param[in,out] dependent Dependent resource in colocation * \param[in] primary Primary resource in colocation * \param[in] colocation Colocation constraint to apply * \param[in] for_dependent true if called on behalf of dependent */ void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent) { enum pcmk__coloc_affects filter_results; CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL), return); if (for_dependent) { // Always process on behalf of primary resource primary->cmds->apply_coloc_score(dependent, primary, colocation, false); return; } filter_results = pcmk__colocation_affects(dependent, primary, colocation, false); pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)", ((colocation->score > 0)? "Colocating" : "Anti-colocating"), dependent->id, primary->id, colocation->id, colocation->score, filter_results); switch (filter_results) { case pcmk__coloc_affects_role: pcmk__apply_coloc_to_priority(dependent, primary, colocation); break; case pcmk__coloc_affects_location: pcmk__apply_coloc_to_weights(dependent, primary, colocation); break; default: // pcmk__coloc_affects_nothing return; } } /*! * \internal * \brief Return action flags for a given primitive resource action * * \param[in,out] action Action to get flags for * \param[in] node If not NULL, limit effects to this node (ignored) * * \return Flags appropriate to \p action on \p node */ enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node) { CRM_ASSERT(action != NULL); return action->flags; } /*! * \internal * \brief Check whether a node is a multiply active resource's expected node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p rsc is multiply active with multiple-active set to * stop_unexpected, and \p node is the node where it will remain active * \note This assumes that the resource's next role cannot be changed to stopped * after this is called, which should be reasonable if status has already * been unpacked and resources have been assigned to nodes. */ static bool is_expected_node(const pe_resource_t *rsc, const pe_node_t *node) { return pcmk_all_flags_set(rsc->flags, pe_rsc_stop_unexpected|pe_rsc_restarting) && (rsc->next_role > RSC_ROLE_STOPPED) && pe__same_node(rsc->allocated_to, node); } /*! * \internal * \brief Schedule actions needed to stop a resource wherever it is active * * \param[in,out] rsc Resource being stopped * \param[in] node Node where resource is being stopped (ignored) * \param[in] optional Whether actions should be optional */ static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; pe_action_t *stop = NULL; if (is_expected_node(rsc, current)) { /* We are scheduling restart actions for a multiply active resource * with multiple-active=stop_unexpected, and this is where it should * not be stopped. */ pe_rsc_trace(rsc, "Skipping stop of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); continue; } if (rsc->partial_migration_target != NULL) { // Continue migration if node originally was and remains target if (pe__same_node(current, rsc->partial_migration_target) && pe__same_node(current, rsc->allocated_to)) { pe_rsc_trace(rsc, "Skipping stop of %s on %s " "because partial migration there will continue", rsc->id, pe__node_name(current)); continue; } else { pe_rsc_trace(rsc, "Forcing stop of %s on %s " "because migration target changed", rsc->id, pe__node_name(current)); optional = false; } } pe_rsc_trace(rsc, "Scheduling stop of %s on %s", rsc->id, pe__node_name(current)); stop = stop_action(rsc, current, optional); if (rsc->allocated_to == NULL) { pe_action_set_reason(stop, "node availability", true); } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting |pe_rsc_stop_unexpected)) { /* We are stopping a multiply active resource on a node that is * not its expected node, and we are still scheduling restart * actions, so the stop is for being multiply active. */ pe_action_set_reason(stop, "being multiply active", true); } if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe__clear_action_flags(stop, pe_action_runnable); } if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) { pcmk__schedule_cleanup(rsc, current, optional); } if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false, rsc->cluster); order_actions(stop, unfence, pe_order_implies_first); if (!pcmk__node_unfenced(current)) { pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, pe__node_name(current)); } } } } /*! * \internal * \brief Schedule actions needed to start a resource on a node * * \param[in,out] rsc Resource being started - * \param[in] node Node where resource should be started + * \param[in,out] node Node where resource should be started * \param[in] optional Whether actions should be optional */ static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { pe_action_t *start = NULL; CRM_ASSERT(node != NULL); pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)", (optional? "optional" : "required"), rsc->id, pe__node_name(node), node->weight); start = start_action(rsc, node, TRUE); pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then); if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) { pe__clear_action_flags(start, pe_action_optional); } if (is_expected_node(rsc, node)) { /* This could be a problem if the start becomes necessary for other * reasons later. */ pe_rsc_trace(rsc, "Start of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(start, pe_action_pseudo); } } /*! * \internal * \brief Schedule actions needed to promote a resource on a node * * \param[in,out] rsc Resource being promoted * \param[in] node Node where resource should be promoted * \param[in] optional Whether actions should be optional */ static void promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { GList *iter = NULL; GList *action_list = NULL; bool runnable = true; CRM_ASSERT(node != NULL); // Any start must be runnable for promotion to be runnable action_list = pe__resource_actions(rsc, node, RSC_START, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *start = (pe_action_t *) iter->data; if (!pcmk_is_set(start->flags, pe_action_runnable)) { runnable = false; } } g_list_free(action_list); if (runnable) { pe_action_t *promote = promote_action(rsc, node, optional); pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(node)); if (is_expected_node(rsc, node)) { /* This could be a problem if the promote becomes necessary for * other reasons later. */ pe_rsc_trace(rsc, "Promotion of multiply active resouce %s " "on expected node %s will be a pseudo-action", rsc->id, pe__node_name(node)); pe__set_action_flags(promote, pe_action_pseudo); } } else { pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable", rsc->id, pe__node_name(node)); action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true); for (iter = action_list; iter != NULL; iter = iter->next) { pe_action_t *promote = (pe_action_t *) iter->data; pe__clear_action_flags(promote, pe_action_runnable); } g_list_free(action_list); } } /*! * \internal * \brief Schedule actions needed to demote a resource wherever it is active * * \param[in,out] rsc Resource being demoted * \param[in] node Node where resource should be demoted (ignored) * \param[in] optional Whether actions should be optional */ static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional) { /* Since this will only be called for a primitive (possibly as an instance * of a collective resource), the resource is multiply active if it is * running on more than one node, so we want to demote on all of them as * part of recovery, regardless of which one is the desired node. */ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current = (pe_node_t *) iter->data; if (is_expected_node(rsc, current)) { pe_rsc_trace(rsc, "Skipping demote of multiply active resource %s " "on expected node %s", rsc->id, pe__node_name(current)); } else { pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s", (optional? "optional" : "required"), rsc->id, pe__node_name(current)); demote_action(rsc, current, optional); } } } static void assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional) { CRM_ASSERT(false); } /*! * \internal * \brief Schedule cleanup of a resource * * \param[in,out] rsc Resource to clean up * \param[in] node Node to clean up on * \param[in] optional Whether clean-up should be optional */ void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional) { /* If the cleanup is required, its orderings are optional, because they're * relevant only if both actions are required. Conversely, if the cleanup is * optional, the orderings make the then action required if the first action * becomes required. */ uint32_t flag = optional? pe_order_implies_then : pe_order_optional; CRM_CHECK((rsc != NULL) && (node != NULL), return); if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed", rsc->id, pe__node_name(node)); return; } if (node->details->unclean || !node->details->online) { pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable", rsc->id, pe__node_name(node)); return; } crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node)); delete_action(rsc, node, optional); // stop -> clean-up -> start pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE, flag); pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START, flag); } /*! * \internal * \brief Add primitive meta-attributes relevant to graph actions to XML * * \param[in] rsc Primitive resource whose meta-attributes should be added * \param[in,out] xml Transition graph action attributes XML to add to */ void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml) { char *name = NULL; char *value = NULL; const pe_resource_t *parent = NULL; CRM_ASSERT((rsc != NULL) && (xml != NULL)); /* Clone instance numbers get set internally as meta-attributes, and are * needed in the transition graph (for example, to tell unique clone * instances apart). */ value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_INCARNATION); crm_xml_add(xml, name, value); free(name); } // Not sure if this one is really needed ... value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE); if (value != NULL) { name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE); crm_xml_add(xml, name, value); free(name); } /* The container meta-attribute can be set on the primitive itself or one of * its parents (for example, a group inside a container resource), so check * them all, and keep the highest one found. */ for (parent = rsc; parent != NULL; parent = parent->parent) { if (parent->container != NULL) { crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER, parent->container->id); } } /* Bundle replica children will get their external-ip set internally as a * meta-attribute. The graph action needs it, but under a different naming * convention than other meta-attributes. */ value = g_hash_table_lookup(rsc->meta, "external-ip"); if (value != NULL) { crm_xml_add(xml, "pcmk_external_ip", value); } } // Primitive implementation of resource_alloc_functions_t:add_utilization() void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization", orig_rsc->id, rsc->id); pcmk__release_node_capacity(utilization, rsc); } /*! * \internal * \brief Get epoch time of node's shutdown attribute (or now if none) * - * \param[in] node Node to check - * \param[in] data_set Cluster working set + * \param[in,out] node Node to check * * \return Epoch time corresponding to shutdown attribute if set or now if not */ static time_t -shutdown_time(const pe_node_t *node) +shutdown_time(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); time_t result = 0; if (shutdown != NULL) { long long result_ll; if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) { result = (time_t) result_ll; } } return (result == 0)? get_effective_time(node->details->data_set) : result; } /*! * \internal * \brief Ban a resource from a node if it's not locked to the node * - * \param[in] data Node to check - * \param[in] user_data Resource to check + * \param[in] data Node to check + * \param[in,out] user_data Resource to check */ static void ban_if_not_locked(gpointer data, gpointer user_data) { - pe_node_t *node = (pe_node_t *) data; + const pe_node_t *node = (const pe_node_t *) data; pe_resource_t *rsc = (pe_resource_t *) user_data; if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) { resource_location(rsc, node, -CRM_SCORE_INFINITY, XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster); } } // Primitive implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__primitive_shutdown_lock(pe_resource_t *rsc) { const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS); // Fence devices and remote connections can't be locked if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches) || pe__resource_is_remote_conn(rsc, rsc->cluster)) { return; } if (rsc->lock_node != NULL) { // The lock was obtained from resource history if (rsc->running_on != NULL) { /* The resource was started elsewhere even though it is now * considered locked. This shouldn't be possible, but as a * failsafe, we don't want to disturb the resource now. */ pe_rsc_info(rsc, "Cancelling shutdown lock because %s is already active", rsc->id); pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster); rsc->lock_node = NULL; rsc->lock_time = 0; } // Only a resource active on exactly one node can be locked } else if (pcmk__list_of_1(rsc->running_on)) { pe_node_t *node = rsc->running_on->data; if (node->details->shutdown) { if (node->details->unclean) { pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown", rsc->id, pe__node_name(node)); } else { rsc->lock_node = node; rsc->lock_time = shutdown_time(node); } } } if (rsc->lock_node == NULL) { // No lock needed return; } if (rsc->cluster->shutdown_lock > 0) { time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock; pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)", rsc->id, pe__node_name(rsc->lock_node), (long long) lock_expiration); pe__update_recheck_time(++lock_expiration, rsc->cluster); } else { pe_rsc_info(rsc, "Locking %s to %s due to shutdown", rsc->id, pe__node_name(rsc->lock_node)); } // If resource is locked to one node, ban it from all other nodes g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc); } diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index 7c06ba9139..dbd8805fe6 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -1,1265 +1,1268 @@ /* * Copyright 2004-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Add implicit promotion ordering for a promotable instance * - * \param[in] clone Clone resource - * \param[in] child Instance of \p clone being ordered - * \param[in] last Previous instance ordered (NULL if \p child is first) + * \param[in,out] clone Clone resource + * \param[in,out] child Instance of \p clone being ordered + * \param[in,out] last Previous instance ordered (NULL if \p child is first) */ static void order_instance_promotion(pe_resource_t *clone, pe_resource_t *child, pe_resource_t *last) { // "Promote clone" -> promote instance -> "clone promoted" pcmk__order_resource_actions(clone, RSC_PROMOTE, child, RSC_PROMOTE, pe_order_optional); pcmk__order_resource_actions(child, RSC_PROMOTE, clone, RSC_PROMOTED, pe_order_optional); // If clone is ordered, order this instance relative to last if ((last != NULL) && pe__clone_is_ordered(clone)) { pcmk__order_resource_actions(last, RSC_PROMOTE, child, RSC_PROMOTE, pe_order_optional); } } /*! * \internal * \brief Add implicit demotion ordering for a promotable instance * - * \param[in] clone Clone resource - * \param[in] child Instance of \p clone being ordered - * \param[in] last Previous instance ordered (NULL if \p child is first) + * \param[in,out] clone Clone resource + * \param[in,out] child Instance of \p clone being ordered + * \param[in] last Previous instance ordered (NULL if \p child is first) */ static void order_instance_demotion(pe_resource_t *clone, pe_resource_t *child, pe_resource_t *last) { // "Demote clone" -> demote instance -> "clone demoted" pcmk__order_resource_actions(clone, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed); pcmk__order_resource_actions(child, RSC_DEMOTE, clone, RSC_DEMOTED, pe_order_implies_then_printed); // If clone is ordered, order this instance relative to last if ((last != NULL) && pe__clone_is_ordered(clone)) { pcmk__order_resource_actions(child, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional); } } /*! * \internal * \brief Check whether an instance will be promoted or demoted * - * \param[in] rsc Instance to check - * \param[in] demoting If \p rsc will be demoted, this will be set to true - * \param[in] promoting If \p rsc will be promoted, this will be set to true + * \param[in] rsc Instance to check + * \param[out] demoting If \p rsc will be demoted, this will be set to true + * \param[out] promoting If \p rsc will be promoted, this will be set to true */ static void -check_for_role_change(pe_resource_t *rsc, bool *demoting, bool *promoting) +check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting) { - GList *iter = NULL; + const GList *iter = NULL; // If this is a cloned group, check group members recursively if (rsc->children != NULL) { for (iter = rsc->children; iter != NULL; iter = iter->next) { - check_for_role_change((pe_resource_t *) iter->data, + check_for_role_change((const pe_resource_t *) iter->data, demoting, promoting); } return; } for (iter = rsc->actions; iter != NULL; iter = iter->next) { - pe_action_t *action = (pe_action_t *) iter->data; + const pe_action_t *action = (const pe_action_t *) iter->data; if (*promoting && *demoting) { return; } else if (pcmk_is_set(action->flags, pe_action_optional)) { continue; } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_none)) { *demoting = true; } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_none)) { *promoting = true; } } } /*! * \internal * \brief Add promoted-role location constraint scores to an instance's priority * * Adjust a promotable clone instance's promotion priority by the scores of any * location constraints in a list that are both limited to the promoted role and * for the node where the instance will be placed. * - * \param[in] child Promotable clone instance - * \param[in] location_constraints List of location constraints to apply - * \param[in] chosen Node where \p child will be placed + * \param[in,out] child Promotable clone instance + * \param[in] location_constraints List of location constraints to apply + * \param[in] chosen Node where \p child will be placed */ static void -apply_promoted_locations(pe_resource_t *child, GList *location_constraints, - pe_node_t *chosen) +apply_promoted_locations(pe_resource_t *child, + const GList *location_constraints, + const pe_node_t *chosen) { - for (GList *iter = location_constraints; iter; iter = iter->next) { - pe__location_t *location = iter->data; + for (const GList *iter = location_constraints; iter; iter = iter->next) { + const pe__location_t *location = iter->data; pe_node_t *weighted_node = NULL; if (location->role_filter == RSC_ROLE_PROMOTED) { weighted_node = pe_find_node_id(location->node_list_rh, chosen->details->id); } if (weighted_node != NULL) { int new_priority = pcmk__add_scores(child->priority, weighted_node->weight); pe_rsc_trace(child, "Applying location %s to %s promotion priority on %s: " "%d + %d = %d", location->id, child->id, pe__node_name(weighted_node), child->priority, weighted_node->weight, new_priority); child->priority = new_priority; } } } /*! * \internal * \brief Get the node that an instance will be promoted on * * \param[in] rsc Promotable clone instance to check * * \return Node that \p rsc will be promoted on, or NULL if none */ static pe_node_t * -node_to_be_promoted_on(pe_resource_t *rsc) +node_to_be_promoted_on(const pe_resource_t *rsc) { pe_node_t *node = NULL; pe_node_t *local_node = NULL; const pe_resource_t *parent = NULL; // If this is a cloned group, bail if any group member can't be promoted for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; if (node_to_be_promoted_on(child) == NULL) { pe_rsc_trace(rsc, "%s can't be promoted because member %s can't", rsc->id, child->id); return NULL; } } node = rsc->fns->location(rsc, NULL, FALSE); if (node == NULL) { pe_rsc_trace(rsc, "%s can't be promoted because it won't be active", rsc->id); return NULL; } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED) { crm_notice("Unmanaged instance %s will be left promoted on %s", rsc->id, pe__node_name(node)); } else { pe_rsc_trace(rsc, "%s can't be promoted because it is unmanaged", rsc->id); return NULL; } } else if (rsc->priority < 0) { pe_rsc_trace(rsc, "%s can't be promoted because its promotion priority %d " "is negative", rsc->id, rsc->priority); return NULL; } else if (!pcmk__node_available(node, false, true)) { pe_rsc_trace(rsc, "%s can't be promoted because %s can't run resources", rsc->id, pe__node_name(node)); return NULL; } parent = pe__const_top_resource(rsc, false); local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id); if (local_node == NULL) { /* It should not be possible for the scheduler to have allocated the * instance to a node where its parent is not allowed, but it's good to * have a fail-safe. */ if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { crm_warn("%s can't be promoted because %s is not allowed on %s " "(scheduler bug?)", rsc->id, parent->id, pe__node_name(node)); } // else the instance is unmanaged and already promoted return NULL; } else if ((local_node->count >= pe__clone_promoted_node_max(parent)) && pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s can't be promoted because %s has " "maximum promoted instances already", rsc->id, pe__node_name(node)); return NULL; } return local_node; } /*! * \internal * \brief Compare two promotable clone instances by promotion priority * * \param[in] a First instance to compare * \param[in] b Second instance to compare * * \return A negative number if \p a has higher promotion priority, * a positive number if \p b has higher promotion priority, * or 0 if promotion priorities are equal */ static gint cmp_promotable_instance(gconstpointer a, gconstpointer b) { const pe_resource_t *rsc1 = (const pe_resource_t *) a; const pe_resource_t *rsc2 = (const pe_resource_t *) b; enum rsc_role_e role1 = RSC_ROLE_UNKNOWN; enum rsc_role_e role2 = RSC_ROLE_UNKNOWN; CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL)); // Check sort index set by pcmk__set_instance_roles() if (rsc1->sort_index > rsc2->sort_index) { pe_rsc_trace(rsc1, "%s has higher promotion priority than %s " "(sort index %d > %d)", rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index); return -1; } else if (rsc1->sort_index < rsc2->sort_index) { pe_rsc_trace(rsc1, "%s has lower promotion priority than %s " "(sort index %d < %d)", rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index); return 1; } // If those are the same, prefer instance whose current role is higher role1 = rsc1->fns->state(rsc1, TRUE); role2 = rsc2->fns->state(rsc2, TRUE); if (role1 > role2) { pe_rsc_trace(rsc1, "%s has higher promotion priority than %s " "(higher current role)", rsc1->id, rsc2->id); return -1; } else if (role1 < role2) { pe_rsc_trace(rsc1, "%s has lower promotion priority than %s " "(lower current role)", rsc1->id, rsc2->id); return 1; } // Finally, do normal clone instance sorting return pcmk__cmp_instance(a, b); } /*! * \internal * \brief Add a promotable clone instance's sort index to its node's weight * * Add a promotable clone instance's sort index (which sums its promotion * preferences and scores of relevant location constraints for the promoted * role) to the node weight of the instance's allocated node. * - * \param[in] data Promotable clone instance - * \param[in] user_data Clone parent of \p data + * \param[in] data Promotable clone instance + * \param[in,out] user_data Clone parent of \p data */ static void add_sort_index_to_node_weight(gpointer data, gpointer user_data) { - pe_resource_t *child = (pe_resource_t *) data; + const pe_resource_t *child = (const pe_resource_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_node_t *node = NULL; - pe_node_t *chosen = NULL; + const pe_node_t *chosen = NULL; if (child->sort_index < 0) { pe_rsc_trace(clone, "Not adding sort index of %s: negative", child->id); return; } chosen = child->fns->location(child, NULL, FALSE); if (chosen == NULL) { pe_rsc_trace(clone, "Not adding sort index of %s: inactive", child->id); return; } node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); pe_rsc_trace(clone, "Adding sort index %s of %s to weight for %s", pcmk_readable_score(child->sort_index), child->id, pe__node_name(node)); node->weight = pcmk__add_scores(child->sort_index, node->weight); } /*! * \internal * \brief Apply colocation to dependent's node weights if for promoted role * - * \param[in] data Colocation constraint to apply - * \param[in] user_data Promotable clone that is constraint's dependent + * \param[in,out] data Colocation constraint to apply + * \param[in,out] user_data Promotable clone that is constraint's dependent */ static void apply_coloc_to_dependent(gpointer data, gpointer user_data) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_resource_t *primary = constraint->primary; uint32_t flags = pcmk__coloc_select_default; float factor = constraint->score / (float) INFINITY; if (constraint->dependent_role != RSC_ROLE_PROMOTED) { return; } if (constraint->score < INFINITY) { flags = pcmk__coloc_select_active; } pe_rsc_trace(clone, "Applying colocation %s (promoted %s with %s) @%s", constraint->id, constraint->dependent->id, constraint->primary->id, pcmk_readable_score(constraint->score)); pcmk__add_colocated_node_scores(primary, clone->id, &clone->allowed_nodes, constraint->node_attribute, factor, flags); } /*! * \internal * \brief Apply colocation to primary's node weights if for promoted role * - * \param[in] data Colocation constraint to apply - * \param[in] user_data Promotable clone that is constraint's primary + * \param[in,out] data Colocation constraint to apply + * \param[in,out] user_data Promotable clone that is constraint's primary */ static void apply_coloc_to_primary(gpointer data, gpointer user_data) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_resource_t *dependent = constraint->dependent; const float factor = constraint->score / (float) INFINITY; const uint32_t flags = pcmk__coloc_select_active |pcmk__coloc_select_nonnegative; if ((constraint->primary_role != RSC_ROLE_PROMOTED) || !pcmk__colocation_has_influence(constraint, NULL)) { return; } pe_rsc_trace(clone, "Applying colocation %s (%s with promoted %s) @%s", constraint->id, constraint->dependent->id, constraint->primary->id, pcmk_readable_score(constraint->score)); pcmk__add_colocated_node_scores(dependent, clone->id, &clone->allowed_nodes, constraint->node_attribute, factor, flags); } /*! * \internal * \brief Set clone instance's sort index to its node's weight * - * \param[in] data Promotable clone instance - * \param[in] user_data Parent clone of \p data + * \param[in,out] data Promotable clone instance + * \param[in] user_data Parent clone of \p data */ static void set_sort_index_to_node_weight(gpointer data, gpointer user_data) { pe_resource_t *child = (pe_resource_t *) data; - pe_resource_t *clone = (pe_resource_t *) user_data; + const pe_resource_t *clone = (const pe_resource_t *) user_data; pe_node_t *chosen = child->fns->location(child, NULL, FALSE); if (!pcmk_is_set(child->flags, pe_rsc_managed) && (child->next_role == RSC_ROLE_PROMOTED)) { child->sort_index = INFINITY; pe_rsc_trace(clone, "Final sort index for %s is INFINITY (unmanaged promoted)", child->id); } else if ((chosen == NULL) || (child->sort_index < 0)) { pe_rsc_trace(clone, "Final sort index for %s is %d (ignoring node weight)", child->id, child->sort_index); } else { - pe_node_t *node = NULL; + const pe_node_t *node = NULL; - node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes, - chosen->details->id); + node = pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); child->sort_index = node->weight; pe_rsc_trace(clone, "Merging weights for %s: final sort index for %s is %d", clone->id, child->id, child->sort_index); } } /*! * \internal * \brief Sort a promotable clone's instances by descending promotion priority * - * \param[in] clone Promotable clone to sort + * \param[in,out] clone Promotable clone to sort */ static void sort_promotable_instances(pe_resource_t *clone) { if (pe__set_clone_flag(clone, pe__clone_promotion_constrained) == pcmk_rc_already) { return; } pe__set_resource_flags(clone, pe_rsc_merging); for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; pe_rsc_trace(clone, "Merging weights for %s: initial sort index for %s is %d", clone->id, child->id, child->sort_index); } pe__show_node_weights(true, clone, "Before", clone->allowed_nodes, clone->cluster); g_list_foreach(clone->children, add_sort_index_to_node_weight, clone); g_list_foreach(clone->rsc_cons, apply_coloc_to_dependent, clone); g_list_foreach(clone->rsc_cons_lhs, apply_coloc_to_primary, clone); // Ban resource from all nodes if it needs a ticket but doesn't have it pcmk__require_promotion_tickets(clone); pe__show_node_weights(true, clone, "After", clone->allowed_nodes, clone->cluster); // Reset sort indexes to final node weights g_list_foreach(clone->children, set_sort_index_to_node_weight, clone); // Finally, sort instances in descending order of promotion priority clone->children = g_list_sort(clone->children, cmp_promotable_instance); pe__clear_resource_flags(clone, pe_rsc_merging); } /*! * \internal * \brief Find the active instance (if any) of an anonymous clone on a node * * \param[in] clone Anonymous clone to check * \param[in] id Instance ID (without instance number) to check * \param[in] node Node to check * * \return */ static pe_resource_t * -find_active_anon_instance(pe_resource_t *clone, const char *id, +find_active_anon_instance(const pe_resource_t *clone, const char *id, const pe_node_t *node) { for (GList *iter = clone->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; pe_resource_t *active = NULL; // Use ->find_rsc() in case this is a cloned group active = clone->fns->find_rsc(child, id, node, pe_find_clone|pe_find_current); if (active != NULL) { return active; } } return NULL; } /* * \brief Check whether an anonymous clone instance is known on a node * * \param[in] clone Anonymous clone to check * \param[in] id Instance ID (without instance number) to check * \param[in] node Node to check * * \return true if \p id instance of \p clone is known on \p node, * otherwise false */ static bool anonymous_known_on(const pe_resource_t *clone, const char *id, const pe_node_t *node) { for (GList *iter = clone->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; /* Use ->find_rsc() because this might be a cloned group, and knowing * that other members of the group are known here implies nothing. */ child = clone->fns->find_rsc(child, id, NULL, pe_find_clone); CRM_LOG_ASSERT(child != NULL); if (child != NULL) { if (g_hash_table_lookup(child->known_on, node->details->id)) { return true; } } } return false; } /*! * \internal * \brief Check whether a node is allowed to run a resource * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p node is allowed to run \p rsc, otherwise false */ static bool is_allowed(const pe_resource_t *rsc, const pe_node_t *node) { pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); return (allowed != NULL) && (allowed->weight >= 0); } /*! * \brief Check whether a clone instance's promotion score should be considered * * \param[in] rsc Promotable clone instance to check * \param[in] node Node where score would be applied * * \return true if \p rsc's promotion score should be considered on \p node, * otherwise false */ static bool -promotion_score_applies(pe_resource_t *rsc, const pe_node_t *node) +promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node) { char *id = clone_strip(rsc->id); - pe_resource_t *parent = uber_parent(rsc); + const pe_resource_t *parent = pe__const_top_resource(rsc, false); pe_resource_t *active = NULL; const char *reason = "allowed"; // Some checks apply only to anonymous clone instances if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { // If instance is active on the node, its score definitely applies active = find_active_anon_instance(parent, id, node); if (active == rsc) { reason = "active"; goto check_allowed; } /* If *no* instance is active on this node, this instance's score will * count if it has been probed on this node. */ if ((active == NULL) && anonymous_known_on(parent, id, node)) { reason = "probed"; goto check_allowed; } } /* If this clone's status is unknown on *all* nodes (e.g. cluster startup), * take all instances' scores into account, to make sure we use any * permanent promotion scores. */ if ((rsc->running_on == NULL) && (g_hash_table_size(rsc->known_on) == 0)) { reason = "none probed"; goto check_allowed; } /* Otherwise, we've probed and/or started the resource *somewhere*, so * consider promotion scores on nodes where we know the status. */ if ((pe_hash_table_lookup(rsc->known_on, node->details->id) != NULL) || (pe_find_node_id(rsc->running_on, node->details->id) != NULL)) { reason = "known"; } else { pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not probed", rsc->id, id, pe__node_name(node)); free(id); return false; } check_allowed: if (is_allowed(rsc, node)) { pe_rsc_trace(rsc, "Counting %s promotion score (for %s) on %s: %s", rsc->id, id, pe__node_name(node), reason); free(id); return true; } pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not allowed", rsc->id, id, pe__node_name(node)); free(id); return false; } /*! * \internal * \brief Get the value of a promotion score node attribute * * \param[in] rsc Promotable clone instance to get promotion score for * \param[in] node Node to get promotion score for * \param[in] name Resource name to use in promotion score attribute name * * \return Value of promotion score node attribute for \p rsc on \p node */ static const char * -promotion_attr_value(pe_resource_t *rsc, const pe_node_t *node, +promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node, const char *name) { char *attr_name = NULL; const char *attr_value = NULL; CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL); attr_name = pcmk_promotion_score_name(name); attr_value = pe_node_attribute_calculated(node, attr_name, rsc); free(attr_name); return attr_value; } /*! * \internal * \brief Get the promotion score for a clone instance on a node * * \param[in] rsc Promotable clone instance to get score for * \param[in] node Node to get score for * \param[out] is_default If non-NULL, will be set true if no score available * * \return Promotion score for \p rsc on \p node (or 0 if none) */ static int -promotion_score(pe_resource_t *rsc, const pe_node_t *node, bool *is_default) +promotion_score(const pe_resource_t *rsc, const pe_node_t *node, + bool *is_default) { char *name = NULL; const char *attr_value = NULL; if (is_default != NULL) { *is_default = true; } CRM_CHECK((rsc != NULL) && (node != NULL), return 0); /* If this is an instance of a cloned group, the promotion score is the sum * of all members' promotion scores. */ if (rsc->children != NULL) { int score = 0; - for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { - pe_resource_t *child = (pe_resource_t *) iter->data; + for (const GList *iter = rsc->children; + iter != NULL; iter = iter->next) { + + const pe_resource_t *child = (const pe_resource_t *) iter->data; bool child_default = false; int child_score = promotion_score(child, node, &child_default); if (!child_default && (is_default != NULL)) { *is_default = false; } score += child_score; } return score; } if (!promotion_score_applies(rsc, node)) { return 0; } /* For the promotion score attribute name, use the name the resource is * known as in resource history, since that's what crm_attribute --promotion * would have used. */ name = (rsc->clone_name == NULL)? rsc->id : rsc->clone_name; attr_value = promotion_attr_value(rsc, node, name); if (attr_value != NULL) { pe_rsc_trace(rsc, "Promotion score for %s on %s = %s", name, pe__node_name(node), pcmk__s(attr_value, "(unset)")); } else if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { /* If we don't have any resource history yet, we won't have clone_name. * In that case, for anonymous clones, try the resource name without * any instance number. */ name = clone_strip(rsc->id); if (strcmp(rsc->id, name) != 0) { attr_value = promotion_attr_value(rsc, node, name); pe_rsc_trace(rsc, "Promotion score for %s on %s (for %s) = %s", name, pe__node_name(node), rsc->id, pcmk__s(attr_value, "(unset)")); } free(name); } if (attr_value == NULL) { return 0; } if (is_default != NULL) { *is_default = false; } return char2score(attr_value); } /*! * \internal * \brief Include promotion scores in instances' node weights and priorities * - * \param[in] rsc Promotable clone resource to update + * \param[in,out] rsc Promotable clone resource to update */ void pcmk__add_promotion_scores(pe_resource_t *rsc) { if (pe__set_clone_flag(rsc, pe__clone_promotion_added) == pcmk_rc_already) { return; } for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child_rsc = (pe_resource_t *) iter->data; GHashTableIter iter; pe_node_t *node = NULL; int score, new_score; g_hash_table_iter_init(&iter, child_rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (!pcmk__node_available(node, false, false)) { /* This node will never be promoted, so don't apply the * promotion score, as that may lead to clone shuffling. */ continue; } score = promotion_score(child_rsc, node, NULL); if (score > 0) { new_score = pcmk__add_scores(node->weight, score); if (new_score != node->weight) { pe_rsc_trace(rsc, "Adding promotion score to preference " "for %s on %s (%d->%d)", child_rsc->id, pe__node_name(node), node->weight, new_score); node->weight = new_score; } } if (score > child_rsc->priority) { pe_rsc_trace(rsc, "Updating %s priority to promotion score (%d->%d)", child_rsc->id, child_rsc->priority, score); child_rsc->priority = score; } } } } /*! * \internal * \brief If a resource's current role is started, change it to unpromoted * - * \param[in] data Resource to update - * \param[in] user_data Ignored + * \param[in,out] data Resource to update + * \param[in] user_data Ignored */ static void set_current_role_unpromoted(void *data, void *user_data) { pe_resource_t *rsc = (pe_resource_t *) data; if (rsc->role == RSC_ROLE_STARTED) { // Promotable clones should use unpromoted role instead of started rsc->role = RSC_ROLE_UNPROMOTED; } g_list_foreach(rsc->children, set_current_role_unpromoted, NULL); } /*! * \internal * \brief Set a resource's next role to unpromoted (or stopped if unassigned) * - * \param[in] data Resource to update - * \param[in] user_data Ignored + * \param[in,out] data Resource to update + * \param[in] user_data Ignored */ static void set_next_role_unpromoted(void *data, void *user_data) { pe_resource_t *rsc = (pe_resource_t *) data; GList *assigned = NULL; rsc->fns->location(rsc, &assigned, FALSE); if (assigned == NULL) { pe__set_next_role(rsc, RSC_ROLE_STOPPED, "stopped instance"); } else { pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, "unpromoted instance"); g_list_free(assigned); } g_list_foreach(rsc->children, set_next_role_unpromoted, NULL); } /*! * \internal * \brief Set a resource's next role to promoted if not already set * - * \param[in] data Resource to update - * \param[in] user_data Ignored + * \param[in,out] data Resource to update + * \param[in] user_data Ignored */ static void set_next_role_promoted(void *data, gpointer user_data) { pe_resource_t *rsc = (pe_resource_t *) data; if (rsc->next_role == RSC_ROLE_UNKNOWN) { pe__set_next_role(rsc, RSC_ROLE_PROMOTED, "promoted instance"); } g_list_foreach(rsc->children, set_next_role_promoted, NULL); } /*! * \internal * \brief Show instance's promotion score on node where it will be active * - * \param[in] instance Promotable clone instance to show + * \param[in,out] instance Promotable clone instance to show */ static void show_promotion_score(pe_resource_t *instance) { pe_node_t *chosen = instance->fns->location(instance, NULL, FALSE); if (pcmk_is_set(instance->cluster->flags, pe_flag_show_scores) && !pcmk__is_daemon && (instance->cluster->priv != NULL)) { pcmk__output_t *out = instance->cluster->priv; out->message(out, "promotion-score", instance, chosen, pcmk_readable_score(instance->sort_index)); } else { pe_rsc_debug(pe__const_top_resource(instance, false), "%s promotion score on %s: sort=%s priority=%s", instance->id, ((chosen == NULL)? "none" : pe__node_name(chosen)), pcmk_readable_score(instance->sort_index), pcmk_readable_score(instance->priority)); } } /*! * \internal * \brief Set a clone instance's promotion priority * - * \param[in] data Promotable clone instance to update - * \param[in] user_data Instance's parent clone + * \param[in,out] data Promotable clone instance to update + * \param[in] user_data Instance's parent clone */ static void set_instance_priority(gpointer data, gpointer user_data) { pe_resource_t *instance = (pe_resource_t *) data; - pe_resource_t *clone = (pe_resource_t *) user_data; - pe_node_t *chosen = NULL; + const pe_resource_t *clone = (const pe_resource_t *) user_data; + const pe_node_t *chosen = NULL; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; GList *list = NULL; pe_rsc_trace(clone, "Assigning priority for %s: %s", instance->id, role2text(instance->next_role)); if (instance->fns->state(instance, TRUE) == RSC_ROLE_STARTED) { set_current_role_unpromoted(instance, NULL); } // Only an instance that will be active can be promoted chosen = instance->fns->location(instance, &list, FALSE); if (pcmk__list_of_multiple(list)) { pcmk__config_err("Cannot promote non-colocated child %s", instance->id); } g_list_free(list); if (chosen == NULL) { return; } next_role = instance->fns->state(instance, FALSE); switch (next_role) { case RSC_ROLE_STARTED: case RSC_ROLE_UNKNOWN: // Set instance priority to its promotion score (or -1 if none) { bool is_default = false; instance->priority = promotion_score(instance, chosen, &is_default); if (is_default) { /* * Default to -1 if no value is set. This allows * instances eligible for promotion to be specified * based solely on rsc_location constraints, but * prevents any instance from being promoted if neither * a constraint nor a promotion score is present */ instance->priority = -1; } } break; case RSC_ROLE_UNPROMOTED: case RSC_ROLE_STOPPED: // Instance can't be promoted instance->priority = -INFINITY; break; case RSC_ROLE_PROMOTED: // Nothing needed (re-creating actions after scheduling fencing) break; default: CRM_CHECK(FALSE, crm_err("Unknown resource role %d for %s", next_role, instance->id)); } // Add relevant location constraint scores for promoted role apply_promoted_locations(instance, instance->rsc_location, chosen); apply_promoted_locations(instance, clone->rsc_location, chosen); // Apply relevant colocations with promoted role for (GList *iter = instance->rsc_cons; iter != NULL; iter = iter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) iter->data; instance->cmds->apply_coloc_score(instance, cons->primary, cons, true); } instance->sort_index = instance->priority; if (next_role == RSC_ROLE_PROMOTED) { instance->sort_index = INFINITY; } pe_rsc_trace(clone, "Assigning %s priority = %d", instance->id, instance->priority); } /*! * \internal * \brief Set a promotable clone instance's role * - * \param[in] data Promotable clone instance to update - * \param[in] user_data Pointer to count of instances chosen for promotion + * \param[in,out] data Promotable clone instance to update + * \param[in,out] user_data Pointer to count of instances chosen for promotion */ static void set_instance_role(gpointer data, gpointer user_data) { pe_resource_t *instance = (pe_resource_t *) data; int *count = (int *) user_data; const pe_resource_t *clone = pe__const_top_resource(instance, false); pe_node_t *chosen = NULL; show_promotion_score(instance); if (instance->sort_index < 0) { pe_rsc_trace(clone, "Not supposed to promote instance %s", instance->id); } else if ((*count < pe__clone_promoted_max(instance)) || !pcmk_is_set(clone->flags, pe_rsc_managed)) { chosen = node_to_be_promoted_on(instance); } if (chosen == NULL) { set_next_role_unpromoted(instance, NULL); return; } if ((instance->role < RSC_ROLE_PROMOTED) && !pcmk_is_set(instance->cluster->flags, pe_flag_have_quorum) && (instance->cluster->no_quorum_policy == no_quorum_freeze)) { crm_notice("Clone instance %s cannot be promoted without quorum", instance->id); set_next_role_unpromoted(instance, NULL); return; } chosen->count++; pe_rsc_info(clone, "Choosing %s (%s) on %s for promotion", instance->id, role2text(instance->role), pe__node_name(chosen)); set_next_role_promoted(instance, NULL); (*count)++; } /*! * \internal * \brief Set roles for all instances of a promotable clone * - * \param[in] clone Promotable clone resource to update + * \param[in,out] rsc Promotable clone resource to update */ void pcmk__set_instance_roles(pe_resource_t *rsc) { int promoted = 0; GHashTableIter iter; pe_node_t *node = NULL; // Repurpose count to track the number of promoted instances allocated g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { node->count = 0; } // Set instances' promotion priorities and sort by highest priority first g_list_foreach(rsc->children, set_instance_priority, rsc); sort_promotable_instances(rsc); // Choose the first N eligible instances to be promoted g_list_foreach(rsc->children, set_instance_role, &promoted); pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d", rsc->id, promoted, pe__clone_promoted_max(rsc)); } /*! * * \internal * \brief Create actions for promotable clone instances * - * \param[in] clone Promotable clone to create actions for - * \param[out] any_promoting Will be set true if any instance is promoting - * \param[out] any_demoting Will be set true if any instance is demoting + * \param[in,out] clone Promotable clone to create actions for + * \param[out] any_promoting Will be set true if any instance is promoting + * \param[out] any_demoting Will be set true if any instance is demoting */ static void create_promotable_instance_actions(pe_resource_t *clone, bool *any_promoting, bool *any_demoting) { for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; instance->cmds->create_actions(instance); check_for_role_change(instance, any_demoting, any_promoting); } } /*! * \internal * \brief Reset each promotable instance's resource priority * * Reset the priority of each instance of a promotable clone to the clone's * priority (after promotion actions are scheduled, when instance priorities * were repurposed as promotion scores). * - * \param[in] clone Promotable clone to reset + * \param[in,out] clone Promotable clone to reset */ static void reset_instance_priorities(pe_resource_t *clone) { for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; instance->priority = clone->priority; } } /*! * \internal * \brief Create actions specific to promotable clones * - * \param[in] clone Promotable clone to create actions for + * \param[in,out] clone Promotable clone to create actions for */ void pcmk__create_promotable_actions(pe_resource_t *clone) { bool any_promoting = false; bool any_demoting = false; // Create actions for each clone instance individually create_promotable_instance_actions(clone, &any_promoting, &any_demoting); // Create pseudo-actions for clone as a whole pe__create_promotable_pseudo_ops(clone, any_promoting, any_demoting); // Undo our temporary repurposing of resource priority for instances reset_instance_priorities(clone); } /*! * \internal * \brief Create internal orderings for a promotable clone's instances * - * \param[in] clone Promotable clone instance to order + * \param[in,out] clone Promotable clone instance to order */ void pcmk__order_promotable_instances(pe_resource_t *clone) { pe_resource_t *previous = NULL; // Needed for ordered clones pcmk__promotable_restart_ordering(clone); for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; // Demote before promote pcmk__order_resource_actions(instance, RSC_DEMOTE, instance, RSC_PROMOTE, pe_order_optional); order_instance_promotion(clone, instance, previous); order_instance_demotion(clone, instance, previous); previous = instance; } } /*! * \internal * \brief Update dependent's allowed nodes for colocation with promotable * * \param[in,out] dependent Dependent resource to update * \param[in] primary_node Node where an instance of the primary will be * \param[in] colocation Colocation constraint to apply */ static void update_dependent_allowed_nodes(pe_resource_t *dependent, const pe_node_t *primary_node, const pcmk__colocation_t *colocation) { GHashTableIter iter; pe_node_t *node = NULL; const char *primary_value = NULL; const char *attr = NULL; if (colocation->score >= INFINITY) { return; // Colocation is mandatory, so allowed node scores don't matter } // Get value of primary's colocation node attribute attr = colocation->node_attribute; if (attr == NULL) { attr = CRM_ATTR_UNAME; } primary_value = pe_node_attribute_raw(primary_node, attr); pe_rsc_trace(colocation->primary, "Applying %s (%s with %s on %s by %s @%d) to %s", colocation->id, colocation->dependent->id, colocation->primary->id, pe__node_name(primary_node), attr, colocation->score, dependent->id); g_hash_table_iter_init(&iter, dependent->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { const char *dependent_value = pe_node_attribute_raw(node, attr); if (pcmk__str_eq(primary_value, dependent_value, pcmk__str_casei)) { pe_rsc_trace(colocation->primary, "%s: %d + %d", pe__node_name(node), node->weight, colocation->score); node->weight = pcmk__add_scores(node->weight, colocation->score); } } } /*! * \brief Update dependent for a colocation with a promotable clone * * \param[in] primary Primary resource in the colocation * \param[in,out] dependent Dependent resource in the colocation * \param[in] colocation Colocation constraint to apply */ void pcmk__update_dependent_with_promotable(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation) { GList *affected_nodes = NULL; /* Build a list of all nodes where an instance of the primary will be, and * (for optional colocations) update the dependent's allowed node scores for * each one. */ for (GList *iter = primary->children; iter != NULL; iter = iter->next) { pe_resource_t *instance = (pe_resource_t *) iter->data; pe_node_t *node = instance->fns->location(instance, NULL, FALSE); if (node == NULL) { continue; } if (instance->fns->state(instance, FALSE) == colocation->primary_role) { update_dependent_allowed_nodes(dependent, node, colocation); affected_nodes = g_list_prepend(affected_nodes, node); } } /* For mandatory colocations, add the primary's node weight to the * dependent's node weight for each affected node, and ban the dependent * from all other nodes. * * However, skip this for promoted-with-promoted colocations, otherwise * inactive dependent instances can't start (in the unpromoted role). */ if ((colocation->score >= INFINITY) && ((colocation->dependent_role != RSC_ROLE_PROMOTED) || (colocation->primary_role != RSC_ROLE_PROMOTED))) { pe_rsc_trace(colocation->primary, "Applying %s (mandatory %s with %s) to %s", colocation->id, colocation->dependent->id, colocation->primary->id, dependent->id); node_list_exclude(dependent->allowed_nodes, affected_nodes, TRUE); } g_list_free(affected_nodes); } /*! * \internal * \brief Update dependent priority for colocation with promotable * * \param[in] primary Primary resource in the colocation * \param[in,out] dependent Dependent resource in the colocation * \param[in] colocation Colocation constraint to apply */ void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary, pe_resource_t *dependent, const pcmk__colocation_t *colocation) { pe_resource_t *primary_instance = NULL; // Look for a primary instance where dependent will be primary_instance = find_compatible_child(dependent, primary, colocation->primary_role, FALSE); if (primary_instance != NULL) { // Add primary instance's priority to dependent's int new_priority = pcmk__add_scores(dependent->priority, colocation->score); pe_rsc_trace(colocation->primary, "Applying %s (%s with %s) to %s priority (%s + %s = %s)", colocation->id, colocation->dependent->id, colocation->primary->id, dependent->id, pcmk_readable_score(dependent->priority), pcmk_readable_score(colocation->score), pcmk_readable_score(new_priority)); dependent->priority = new_priority; } else if (colocation->score >= INFINITY) { // Mandatory colocation, but primary won't be here pe_rsc_trace(colocation->primary, "Applying %s (%s with %s) to %s: can't be promoted", colocation->id, colocation->dependent->id, colocation->primary->id, dependent->id); dependent->priority = -INFINITY; } } diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c index f783a5bf74..fdadedc24a 100644 --- a/lib/pacemaker/pcmk_sched_resource.c +++ b/lib/pacemaker/pcmk_sched_resource.c @@ -1,1088 +1,1088 @@ /* * Copyright 2014-2023 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" // Resource allocation methods that vary by resource variant static resource_alloc_functions_t allocation_methods[] = { { pcmk__primitive_assign, pcmk__primitive_create_actions, pcmk__probe_rsc_on_node, pcmk__primitive_internal_constraints, pcmk__primitive_apply_coloc_score, pcmk__colocated_resources, pcmk__apply_location, pcmk__primitive_action_flags, pcmk__update_ordered_actions, pcmk__output_resource_actions, pcmk__add_rsc_actions_to_graph, pcmk__primitive_add_graph_meta, pcmk__primitive_add_utilization, pcmk__primitive_shutdown_lock, }, { pcmk__group_assign, pcmk__group_create_actions, pcmk__probe_rsc_on_node, pcmk__group_internal_constraints, pcmk__group_apply_coloc_score, pcmk__group_colocated_resources, pcmk__group_apply_location, pcmk__group_action_flags, pcmk__group_update_ordered_actions, pcmk__output_resource_actions, pcmk__add_rsc_actions_to_graph, pcmk__noop_add_graph_meta, pcmk__group_add_utilization, pcmk__group_shutdown_lock, }, { pcmk__clone_allocate, clone_create_actions, clone_create_probe, clone_internal_constraints, pcmk__clone_apply_coloc_score, pcmk__colocated_resources, clone_rsc_location, clone_action_flags, pcmk__multi_update_actions, pcmk__output_resource_actions, clone_expand, clone_append_meta, pcmk__clone_add_utilization, pcmk__clone_shutdown_lock, }, { pcmk__bundle_allocate, pcmk__bundle_create_actions, pcmk__bundle_create_probe, pcmk__bundle_internal_constraints, pcmk__bundle_apply_coloc_score, pcmk__colocated_resources, pcmk__bundle_rsc_location, pcmk__bundle_action_flags, pcmk__multi_update_actions, pcmk__output_bundle_actions, pcmk__bundle_expand, pcmk__noop_add_graph_meta, pcmk__bundle_add_utilization, pcmk__bundle_shutdown_lock, } }; /*! * \internal * \brief Check whether a resource's agent standard, provider, or type changed * - * \param[in] rsc Resource to check - * \param[in] node Node needing unfencing/restart if agent changed - * \param[in] rsc_entry XML with previously known agent information - * \param[in] active_on_node Whether \p rsc is active on \p node + * \param[in,out] rsc Resource to check + * \param[in,out] node Node needing unfencing if agent changed + * \param[in] rsc_entry XML with previously known agent information + * \param[in] active_on_node Whether \p rsc is active on \p node * * \return true if agent for \p rsc changed, otherwise false */ bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_entry, bool active_on_node) { bool changed = false; const char *attr_list[] = { XML_ATTR_TYPE, XML_AGENT_ATTR_CLASS, XML_AGENT_ATTR_PROVIDER }; for (int i = 0; i < PCMK__NELEM(attr_list); i++) { const char *value = crm_element_value(rsc->xml, attr_list[i]); const char *old_value = crm_element_value(rsc_entry, attr_list[i]); if (!pcmk__str_eq(value, old_value, pcmk__str_none)) { changed = true; trigger_unfencing(rsc, node, "Device definition changed", NULL, rsc->cluster); if (active_on_node) { crm_notice("Forcing restart of %s on %s " "because %s changed from '%s' to '%s'", rsc->id, pe__node_name(node), attr_list[i], pcmk__s(old_value, ""), pcmk__s(value, "")); } } } if (changed && active_on_node) { // Make sure the resource is restarted custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE, rsc->cluster); pe__set_resource_flags(rsc, pe_rsc_start_pending); } return changed; } /*! * \internal * \brief Add resource (and any matching children) to list if it matches ID * * \param[in] result List to add resource to * \param[in] rsc Resource to check * \param[in] id ID to match * * \return (Possibly new) head of list */ static GList * add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id) { if ((strcmp(rsc->id, id) == 0) || ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) { result = g_list_prepend(result, rsc); } for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; result = add_rsc_if_matching(result, child, id); } return result; } /*! * \internal * \brief Find all resources matching a given ID by either ID or clone name * * \param[in] id Resource ID to check * \param[in] data_set Cluster working set * * \return List of all resources that match \p id * \note The caller is responsible for freeing the return value with * g_list_free(). */ GList * -pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set) +pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set) { GList *result = NULL; CRM_CHECK((id != NULL) && (data_set != NULL), return NULL); for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id); } return result; } /*! * \internal * \brief Set the variant-appropriate allocation methods for a resource * - * \param[in] rsc Resource to set allocation methods for - * \param[in] ignored Only here so function can be used with g_list_foreach() + * \param[in,out] rsc Resource to set allocation methods for + * \param[in] ignored Here so function can be used with g_list_foreach() */ static void set_allocation_methods_for_rsc(pe_resource_t *rsc, void *ignored) { rsc->cmds = &allocation_methods[rsc->variant]; g_list_foreach(rsc->children, (GFunc) set_allocation_methods_for_rsc, NULL); } /*! * \internal * \brief Set the variant-appropriate allocation methods for all resources * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ void pcmk__set_allocation_methods(pe_working_set_t *data_set) { g_list_foreach(data_set->resources, (GFunc) set_allocation_methods_for_rsc, NULL); } // Shared implementation of resource_alloc_functions_t:colocated_resources() GList * pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *colocated_rscs) { const GList *iter = NULL; if (orig_rsc == NULL) { orig_rsc = rsc; } if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) { return colocated_rscs; } pe_rsc_trace(orig_rsc, "%s is in colocation chain with %s", rsc->id, orig_rsc->id); colocated_rscs = g_list_append(colocated_rscs, (gpointer) rsc); // Follow colocations where this resource is the dependent resource for (iter = rsc->rsc_cons; iter != NULL; iter = iter->next) { const pcmk__colocation_t *constraint = iter->data; const pe_resource_t *primary = constraint->primary; if (primary == orig_rsc) { continue; // Break colocation loop } if ((constraint->score == INFINITY) && (pcmk__colocation_affects(rsc, primary, constraint, true) == pcmk__coloc_affects_location)) { colocated_rscs = primary->cmds->colocated_resources(primary, orig_rsc, colocated_rscs); } } // Follow colocations where this resource is the primary resource for (iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) { const pcmk__colocation_t *constraint = iter->data; const pe_resource_t *dependent = constraint->dependent; if (dependent == orig_rsc) { continue; // Break colocation loop } if (pe_rsc_is_clone(rsc) && !pe_rsc_is_clone(dependent)) { continue; // We can't be sure whether dependent will be colocated } if ((constraint->score == INFINITY) && (pcmk__colocation_affects(dependent, rsc, constraint, true) == pcmk__coloc_affects_location)) { colocated_rscs = dependent->cmds->colocated_resources(dependent, orig_rsc, colocated_rscs); } } return colocated_rscs; } // No-op function for variants that don't need to implement add_graph_meta() void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml) { } void pcmk__output_resource_actions(pe_resource_t *rsc) { pcmk__output_t *out = rsc->cluster->priv; pe_node_t *next = NULL; pe_node_t *current = NULL; if (rsc->children != NULL) { for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; child->cmds->output_actions(child); } return; } next = rsc->allocated_to; if (rsc->running_on) { current = pe__current_node(rsc); if (rsc->role == RSC_ROLE_STOPPED) { /* This can occur when resources are being recovered because * the current role can change in pcmk__primitive_create_actions() */ rsc->role = RSC_ROLE_STARTED; } } if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) { /* Don't log stopped orphans */ return; } out->message(out, "rsc-action", rsc, current, next); } /*! * \internal * \brief Assign a specified primitive resource to a node * * Assign a specified primitive resource to a specified node, if the node can * run the resource (or unconditionally, if \p force is true). Mark the resource * as no longer provisional. If the primitive can't be assigned (or \p chosen is * NULL), unassign any previous assignment for it, set its next role to stopped, * and update any existing actions scheduled for it. This is not done * recursively for children, so it should be called only for primitives. * - * \param[in] rsc Resource to assign - * \param[in] chosen Node to assign \p rsc to - * \param[in] force If true, assign to \p chosen even if unavailable + * \param[in,out] rsc Resource to assign + * \param[in,out] chosen Node to assign \p rsc to + * \param[in] force If true, assign to \p chosen even if unavailable * * \return true if \p rsc could be assigned, otherwise false * * \note Assigning a resource to the NULL node using this function is different * from calling pcmk__unassign_resource(), in that it will also update any * actions created for the resource. */ bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force) { pcmk__output_t *out = rsc->cluster->priv; CRM_ASSERT(rsc->variant == pe_native); if (!force && (chosen != NULL)) { if ((chosen->weight < 0) // Allow the graph to assume that guest node connections will come up || (!pcmk__node_available(chosen, true, false) && !pe__is_guest_node(chosen))) { crm_debug("All nodes for resource %s are unavailable, unclean or " "shutting down (%s can%s run resources, with weight %d)", rsc->id, pe__node_name(chosen), (pcmk__node_available(chosen, true, false)? "" : "not"), chosen->weight); pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability"); chosen = NULL; } } pcmk__unassign_resource(rsc); pe__clear_resource_flags(rsc, pe_rsc_provisional); if (chosen == NULL) { crm_debug("Could not allocate a node for %s", rsc->id); pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate"); for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) { pe_action_t *op = (pe_action_t *) iter->data; crm_debug("Updating %s for allocation failure", op->uuid); if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) { pe__clear_action_flags(op, pe_action_optional); } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) { pe__clear_action_flags(op, pe_action_runnable); //pe__set_resource_flags(rsc, pe_rsc_block); } else { // Cancel recurring actions, unless for stopped state const char *interval_ms_s = NULL; const char *target_rc_s = NULL; char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING); interval_ms_s = g_hash_table_lookup(op->meta, XML_LRM_ATTR_INTERVAL_MS); target_rc_s = g_hash_table_lookup(op->meta, XML_ATTR_TE_TARGET_RC); if ((interval_ms_s != NULL) && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none) && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) { pe__clear_action_flags(op, pe_action_runnable); } free(rc_stopped); } } return false; } crm_debug("Assigning %s to %s", rsc->id, pe__node_name(chosen)); rsc->allocated_to = pe__copy_node(chosen); chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc, rsc); chosen->details->num_resources++; chosen->count++; pcmk__consume_node_capacity(chosen->details->utilization, rsc); if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) { out->message(out, "resource-util", rsc, chosen, __func__); } return true; } /*! * \internal * \brief Assign a specified resource (of any variant) to a node * * Assign a specified resource and its children (if any) to a specified node, if * the node can run the resource (or unconditionally, if \p force is true). Mark * the resources as no longer provisional. If the resources can't be assigned * (or \p chosen is NULL), unassign any previous assignments, set next role to * stopped, and update any existing actions scheduled for them. * - * \param[in] rsc Resource to assign - * \param[in] chosen Node to assign \p rsc to - * \param[in] force If true, assign to \p chosen even if unavailable + * \param[in,out] rsc Resource to assign + * \param[in,out] chosen Node to assign \p rsc to + * \param[in] force If true, assign to \p chosen even if unavailable * * \return true if \p rsc could be assigned, otherwise false * * \note Assigning a resource to the NULL node using this function is different * from calling pcmk__unassign_resource(), in that it will also update any * actions created for the resource. */ bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force) { bool changed = false; if (rsc->children == NULL) { if (rsc->allocated_to != NULL) { changed = true; } pcmk__finalize_assignment(rsc, node, force); } else { for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child_rsc = (pe_resource_t *) iter->data; changed |= pcmk__assign_resource(child_rsc, node, force); } } return changed; } /*! * \internal * \brief Remove any assignment of a specified resource to a node * * If a specified resource has been assigned to a node, remove that assignment * and mark the resource as provisional again. This is not done recursively for * children, so it should be called only for primitives. * - * \param[in] rsc Resource to unassign + * \param[in,out] rsc Resource to unassign */ void pcmk__unassign_resource(pe_resource_t *rsc) { pe_node_t *old = rsc->allocated_to; if (old == NULL) { return; } crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old)); pe__set_resource_flags(rsc, pe_rsc_provisional); rsc->allocated_to = NULL; /* We're going to free the pe_node_t, but its details member is shared and * will remain, so update that appropriately first. */ old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc, rsc); old->details->num_resources--; pcmk__release_node_capacity(old->details->utilization, rsc); free(old); } /*! * \internal * \brief Check whether a resource has reached its migration threshold on a node * - * \param[in] rsc Resource to check - * \param[in] node Node to check - * \param[out] failed If the threshold has been reached, this will be set to - * the resource that failed (possibly a parent of \p rsc) + * \param[in,out] rsc Resource to check + * \param[in] node Node to check + * \param[out] failed If threshold has been reached, this will be set to + * resource that failed (possibly a parent of \p rsc) * * \return true if the migration threshold has been reached, false otherwise */ bool -pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node, +pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node, pe_resource_t **failed) { int fail_count, remaining_tries; pe_resource_t *rsc_to_ban = rsc; // Migration threshold of 0 means never force away if (rsc->migration_threshold == 0) { return false; } // If we're ignoring failures, also ignore the migration threshold if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) { return false; } // If there are no failures, there's no need to force away fail_count = pe_get_failcount(node, rsc, NULL, pe_fc_effective|pe_fc_fillers, NULL); if (fail_count <= 0) { return false; } // If failed resource is anonymous clone instance, we'll force clone away if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { rsc_to_ban = uber_parent(rsc); } // How many more times recovery will be tried on this node remaining_tries = rsc->migration_threshold - fail_count; if (remaining_tries <= 0) { crm_warn("%s cannot run on %s due to reaching migration threshold " "(clean up resource to allow again)" CRM_XS " failures=%d migration-threshold=%d", rsc_to_ban->id, pe__node_name(node), fail_count, rsc->migration_threshold); if (failed != NULL) { *failed = rsc_to_ban; } return true; } crm_info("%s can fail %d more time%s on " "%s before reaching migration threshold (%d)", rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries), pe__node_name(node), rsc->migration_threshold); return false; } static void * convert_const_pointer(const void *ptr) { /* Worst function ever */ return (void *)ptr; } /*! * \internal * \brief Get a node's weight * * \param[in] node Unweighted node to check (for node ID) * \param[in] nodes List of weighted nodes to look for \p node in * * \return Node's weight, or -INFINITY if not found */ static int -get_node_weight(pe_node_t *node, GHashTable *nodes) +get_node_weight(const pe_node_t *node, GHashTable *nodes) { pe_node_t *weighted_node = NULL; if ((node != NULL) && (nodes != NULL)) { weighted_node = g_hash_table_lookup(nodes, node->details->id); } return (weighted_node == NULL)? -INFINITY : weighted_node->weight; } /*! * \internal * \brief Compare two resources according to which should be allocated first * * \param[in] a First resource to compare * \param[in] b Second resource to compare * \param[in] data Sorted list of all nodes in cluster * * \return -1 if \p a should be allocated before \b, 0 if they are equal, * or +1 if \p a should be allocated after \b */ static gint cmp_resources(gconstpointer a, gconstpointer b, gpointer data) { const pe_resource_t *resource1 = a; const pe_resource_t *resource2 = b; - GList *nodes = (GList *) data; + const GList *nodes = (const GList *) data; int rc = 0; int r1_weight = -INFINITY; int r2_weight = -INFINITY; pe_node_t *r1_node = NULL; pe_node_t *r2_node = NULL; GHashTable *r1_nodes = NULL; GHashTable *r2_nodes = NULL; const char *reason = NULL; // Resources with highest priority should be allocated first reason = "priority"; r1_weight = resource1->priority; r2_weight = resource2->priority; if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } // We need nodes to make any other useful comparisons reason = "no node list"; if (nodes == NULL) { goto done; } // Calculate and log node weights pcmk__add_colocated_node_scores(convert_const_pointer(resource1), resource1->id, &r1_nodes, NULL, 1, pcmk__coloc_select_this_with); pcmk__add_colocated_node_scores(convert_const_pointer(resource2), resource2->id, &r2_nodes, NULL, 1, pcmk__coloc_select_this_with); pe__show_node_weights(true, NULL, resource1->id, r1_nodes, resource1->cluster); pe__show_node_weights(true, NULL, resource2->id, r2_nodes, resource2->cluster); // The resource with highest score on its current node goes first reason = "current location"; if (resource1->running_on != NULL) { r1_node = pe__current_node(resource1); } if (resource2->running_on != NULL) { r2_node = pe__current_node(resource2); } r1_weight = get_node_weight(r1_node, r1_nodes); r2_weight = get_node_weight(r2_node, r2_nodes); if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } // Otherwise a higher weight on any node will do reason = "score"; - for (GList *iter = nodes; iter != NULL; iter = iter->next) { - pe_node_t *node = (pe_node_t *) iter->data; + for (const GList *iter = nodes; iter != NULL; iter = iter->next) { + const pe_node_t *node = (const pe_node_t *) iter->data; r1_weight = get_node_weight(node, r1_nodes); r2_weight = get_node_weight(node, r2_nodes); if (r1_weight > r2_weight) { rc = -1; goto done; } if (r1_weight < r2_weight) { rc = 1; goto done; } } done: crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s", resource1->id, r1_weight, ((r1_node == NULL)? "" : " on "), ((r1_node == NULL)? "" : r1_node->details->id), ((rc < 0)? '>' : ((rc > 0)? '<' : '=')), resource2->id, r2_weight, ((r2_node == NULL)? "" : " on "), ((r2_node == NULL)? "" : r2_node->details->id), reason); if (r1_nodes != NULL) { g_hash_table_destroy(r1_nodes); } if (r2_nodes != NULL) { g_hash_table_destroy(r2_nodes); } return rc; } /*! * \internal * \brief Sort resources in the order they should be allocated to nodes * - * \param[in] data_set Cluster working set + * \param[in,out] data_set Cluster working set */ void pcmk__sort_resources(pe_working_set_t *data_set) { GList *nodes = g_list_copy(data_set->nodes); nodes = pcmk__sort_nodes(nodes, NULL); data_set->resources = g_list_sort_with_data(data_set->resources, cmp_resources, nodes); g_list_free(nodes); } /*! * \internal * \brief Create a hash table with a single node in it * * \param[in] node Node to copy into new table * * \return Newly created hash table containing a copy of \p node * \note The caller is responsible for freeing the result with * g_hash_table_destroy(). */ static GHashTable * new_node_table(pe_node_t *node) { GHashTable *table = pcmk__strkey_table(NULL, free); node = pe__copy_node(node); g_hash_table_insert(table, (gpointer) node->details->id, node); return table; } /*! * \internal * \brief Apply a resource's parent's colocation scores to a node table * * \param[in] rsc Resource whose colocations should be applied * \param[in,out] nodes Node table to apply colocations to */ static void apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes) { GList *iter = NULL; pcmk__colocation_t *colocation = NULL; for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) { colocation = (pcmk__colocation_t *) iter->data; pcmk__add_colocated_node_scores(colocation->primary, rsc->id, nodes, colocation->node_attribute, colocation->score / (float) INFINITY, pcmk__coloc_select_default); } for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) { colocation = (pcmk__colocation_t *) iter->data; if (!pcmk__colocation_has_influence(colocation, rsc)) { continue; } pcmk__add_colocated_node_scores(colocation->dependent, rsc->id, nodes, colocation->node_attribute, colocation->score / (float) INFINITY, pcmk__coloc_select_nonnegative); } } /*! * \internal * \brief Compare clone or bundle instances based on colocation scores * * Determine the relative order in which two clone or bundle instances should be * assigned to nodes, considering the scores of colocation constraints directly * or indirectly involving them. * * \param[in] instance1 First instance to compare * \param[in] instance2 Second instance to compare * * \return A negative number if \p instance1 should be assigned first, * a positive number if \p instance2 should be assigned first, * or 0 if assignment order doesn't matter */ static int cmp_instance_by_colocation(const pe_resource_t *instance1, const pe_resource_t *instance2) { int rc = 0; pe_node_t *node1 = NULL; pe_node_t *node2 = NULL; pe_node_t *current_node1 = pe__current_node(instance1); pe_node_t *current_node2 = pe__current_node(instance2); GHashTable *colocated_scores1 = NULL; GHashTable *colocated_scores2 = NULL; CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL) && (instance2 != NULL) && (instance2->parent != NULL) && (current_node1 != NULL) && (current_node2 != NULL)); // Create node tables initialized with each node colocated_scores1 = new_node_table(current_node1); colocated_scores2 = new_node_table(current_node2); // Apply parental colocations apply_parent_colocations(instance1, &colocated_scores1); apply_parent_colocations(instance2, &colocated_scores2); // Find original nodes again, with scores updated for colocations node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id); node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id); // Compare nodes by updated scores if (node1->weight < node2->weight) { crm_trace("Assign %s (%d on %s) after %s (%d on %s)", instance1->id, node1->weight, pe__node_name(node1), instance2->id, node2->weight, pe__node_name(node2)); rc = 1; } else if (node1->weight > node2->weight) { crm_trace("Assign %s (%d on %s) before %s (%d on %s)", instance1->id, node1->weight, pe__node_name(node1), instance2->id, node2->weight, pe__node_name(node2)); rc = -1; } g_hash_table_destroy(colocated_scores1); g_hash_table_destroy(colocated_scores2); return rc; } /*! * \internal * \brief Check whether a resource or any of its children are failed * * \param[in] rsc Resource to check * * \return true if \p rsc or any of its children are failed, otherwise false */ static bool -did_fail(const pe_resource_t * rsc) +did_fail(const pe_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { return true; } for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { - if (did_fail((pe_resource_t *) iter->data)) { + if (did_fail((const pe_resource_t *) iter->data)) { return true; } } return false; } /*! * \internal * \brief Check whether a node is allowed to run a resource * * \param[in] rsc Resource to check * \param[in,out] node Node to check (will be set NULL if not allowed) * * \return true if *node is either NULL or allowed for \p rsc, otherwise false */ static bool node_is_allowed(const pe_resource_t *rsc, pe_node_t **node) { if (*node != NULL) { pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes, (*node)->details->id); if ((allowed == NULL) || (allowed->weight < 0)) { pe_rsc_trace(rsc, "%s: current location (%s) is unavailable", rsc->id, pe__node_name(*node)); *node = NULL; return false; } } return true; } /*! * \internal * \brief Compare two clone or bundle instances' instance numbers * * \param[in] a First instance to compare * \param[in] b Second instance to compare * * \return A negative number if \p a's instance number is lower, * a positive number if \p b's instance number is lower, * or 0 if their instance numbers are the same */ gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b) { const pe_resource_t *instance1 = (const pe_resource_t *) a; const pe_resource_t *instance2 = (const pe_resource_t *) b; char *div1 = NULL; char *div2 = NULL; CRM_ASSERT((instance1 != NULL) && (instance2 != NULL)); // Clone numbers are after a colon, bundle numbers after a dash div1 = strrchr(instance1->id, ':'); if (div1 == NULL) { div1 = strrchr(instance1->id, '-'); } div2 = strrchr(instance2->id, ':'); if (div2 == NULL) { div2 = strrchr(instance2->id, '-'); } CRM_ASSERT((div1 != NULL) && (div2 != NULL)); return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10)); } /*! * \internal * \brief Compare clone or bundle instances according to assignment order * * Compare two clone or bundle instances according to the order they should be * assigned to nodes, preferring (in order): * * - Active instance that is less multiply active * - Instance that is not active on a disallowed node * - Instance with higher configured priority * - Active instance whose current node can run resources * - Active instance whose parent is allowed on current node * - Active instance whose current node has fewer other instances * - Active instance * - Failed instance * - Instance whose colocations result in higher score on current node * - Instance with lower ID in lexicographic order * * \param[in] a First instance to compare * \param[in] b Second instance to compare * * \return A negative number if \p a should be assigned first, * a positive number if \p b should be assigned first, * or 0 if assignment order doesn't matter */ gint pcmk__cmp_instance(gconstpointer a, gconstpointer b) { int rc = 0; pe_node_t *node1 = NULL; pe_node_t *node2 = NULL; unsigned int nnodes1 = 0; unsigned int nnodes2 = 0; bool can1 = true; bool can2 = true; const pe_resource_t *instance1 = (const pe_resource_t *) a; const pe_resource_t *instance2 = (const pe_resource_t *) b; CRM_ASSERT((instance1 != NULL) && (instance2 != NULL)); node1 = pe__find_active_on(instance1, &nnodes1, NULL); node2 = pe__find_active_on(instance2, &nnodes2, NULL); /* If both instances are running and at least one is multiply * active, prefer instance that's running on fewer nodes. */ if ((nnodes1 > 0) && (nnodes2 > 0)) { if (nnodes1 < nnodes2) { crm_trace("Assign %s (active on %d) before %s (active on %d): " "less multiply active", instance1->id, nnodes1, instance2->id, nnodes2); return -1; } else if (nnodes1 > nnodes2) { crm_trace("Assign %s (active on %d) after %s (active on %d): " "more multiply active", instance1->id, nnodes1, instance2->id, nnodes2); return 1; } } /* An instance that is either inactive or active on an allowed node is * preferred over an instance that is active on a no-longer-allowed node. */ can1 = node_is_allowed(instance1, &node1); can2 = node_is_allowed(instance2, &node2); if (can1 && !can2) { crm_trace("Assign %s before %s: not active on a disallowed node", instance1->id, instance2->id); return -1; } else if (!can1 && can2) { crm_trace("Assign %s after %s: active on a disallowed node", instance1->id, instance2->id); return 1; } // Prefer instance with higher configured priority if (instance1->priority > instance2->priority) { crm_trace("Assign %s before %s: priority (%d > %d)", instance1->id, instance2->id, instance1->priority, instance2->priority); return -1; } else if (instance1->priority < instance2->priority) { crm_trace("Assign %s after %s: priority (%d < %d)", instance1->id, instance2->id, instance1->priority, instance2->priority); return 1; } // Prefer active instance if ((node1 == NULL) && (node2 == NULL)) { crm_trace("No assignment preference for %s vs. %s: inactive", instance1->id, instance2->id); return 0; } else if (node1 == NULL) { crm_trace("Assign %s after %s: active", instance1->id, instance2->id); return 1; } else if (node2 == NULL) { crm_trace("Assign %s before %s: active", instance1->id, instance2->id); return -1; } // Prefer instance whose current node can run resources can1 = pcmk__node_available(node1, false, false); can2 = pcmk__node_available(node2, false, false); if (can1 && !can2) { crm_trace("Assign %s before %s: current node can run resources", instance1->id, instance2->id); return -1; } else if (!can1 && can2) { crm_trace("Assign %s after %s: current node can't run resources", instance1->id, instance2->id); return 1; } // Prefer instance whose parent is allowed to run on instance's current node node1 = pcmk__top_allowed_node(instance1, node1); node2 = pcmk__top_allowed_node(instance2, node2); if ((node1 == NULL) && (node2 == NULL)) { crm_trace("No assignment preference for %s vs. %s: " "parent not allowed on either instance's current node", instance1->id, instance2->id); return 0; } else if (node1 == NULL) { crm_trace("Assign %s after %s: parent not allowed on current node", instance1->id, instance2->id); return 1; } else if (node2 == NULL) { crm_trace("Assign %s before %s: parent allowed on current node", instance1->id, instance2->id); return -1; } // Prefer instance whose current node is running fewer other instances if (node1->count < node2->count) { crm_trace("Assign %s before %s: fewer active instances on current node", instance1->id, instance2->id); return -1; } else if (node1->count > node2->count) { crm_trace("Assign %s after %s: more active instances on current node", instance1->id, instance2->id); return 1; } // Prefer failed instance can1 = did_fail(instance1); can2 = did_fail(instance2); if (!can1 && can2) { crm_trace("Assign %s before %s: failed", instance1->id, instance2->id); return -1; } else if (can1 && !can2) { crm_trace("Assign %s after %s: not failed", instance1->id, instance2->id); return 1; } // Prefer instance with higher cumulative colocation score on current node rc = cmp_instance_by_colocation(instance1, instance2); if (rc != 0) { return rc; } // Prefer instance with lower instance number rc = pcmk__cmp_instance_number(instance1, instance2); if (rc < 0) { crm_trace("Assign %s before %s: instance number", instance1->id, instance2->id); } else if (rc > 0) { crm_trace("Assign %s after %s: instance number", instance1->id, instance2->id); } else { crm_trace("No assignment preference for %s vs. %s", instance1->id, instance2->id); } return rc; }