diff --git a/include/crm/common/actions_internal.h b/include/crm/common/actions_internal.h index 844f44d90a..707221507b 100644 --- a/include/crm/common/actions_internal.h +++ b/include/crm/common/actions_internal.h @@ -1,276 +1,282 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_ACTIONS_INTERNAL__H #define PCMK__CRM_COMMON_ACTIONS_INTERNAL__H #include // bool #include // uint32_t, UINT32_C() #include // guint, GList, GHashTable #include // xmlNode -#include // PCMK_ACTION_MONITOR, etc. -#include // enum rsc_start_requirement +#include // PCMK_ACTION_MONITOR #include // enum rsc_role_e #include // pcmk_resource_t, pcmk_node_t #include // pcmk__str_eq() #ifdef __cplusplus extern "C" { #endif // Action names as strings // @COMPAT Deprecated since 2.0.0 #define PCMK__ACTION_POWEROFF "poweroff" //! printf-style format to create operation key from resource, action, interval #define PCMK__OP_FMT "%s_%s_%u" /*! * \internal * \brief Set action flags for an action * * \param[in,out] action Action to set flags for * \param[in] flags_to_set Group of enum pcmk__action_flags to set */ #define pcmk__set_action_flags(action, flags_to_set) do { \ (action)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Action", (action)->uuid, \ (action)->flags, \ (flags_to_set), \ #flags_to_set); \ } while (0) /*! * \internal * \brief Clear action flags for an action * * \param[in,out] action Action to clear flags for * \param[in] flags_to_clear Group of enum pcmk__action_flags to clear */ #define pcmk__clear_action_flags(action, flags_to_clear) do { \ (action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Action", (action)->uuid, \ (action)->flags, \ (flags_to_clear), \ #flags_to_clear); \ } while (0) /*! * \internal * \brief Set action flags for a flag group * * \param[in,out] action_flags Flag group to set flags for * \param[in] action_name Name of action being modified (for logging) * \param[in] to_set Group of enum pcmk__action_flags to set */ #define pcmk__set_raw_action_flags(action_flags, action_name, to_set) do { \ action_flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Action", action_name, \ (action_flags), \ (to_set), #to_set); \ } while (0) /*! * \internal * \brief Clear action flags for a flag group * * \param[in,out] action_flags Flag group to clear flags for * \param[in] action_name Name of action being modified (for logging) * \param[in] to_clear Group of enum pcmk__action_flags to clear */ #define pcmk__clear_raw_action_flags(action_flags, action_name, to_clear) \ do { \ action_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Action", action_name, \ (action_flags), \ (to_clear), #to_clear); \ } while (0) // Possible actions (including some pseudo-actions) enum pcmk__action_type { pcmk__action_unspecified = 0, // Unspecified or unknown action pcmk__action_monitor, // Monitor // Each "completed" action must be the regular action plus 1 pcmk__action_stop, // Stop pcmk__action_stopped, // Stop completed pcmk__action_start, // Start pcmk__action_started, // Start completed pcmk__action_notify, // Notify pcmk__action_notified, // Notify completed pcmk__action_promote, // Promote pcmk__action_promoted, // Promoted pcmk__action_demote, // Demote pcmk__action_demoted, // Demoted pcmk__action_shutdown, // Shut down node pcmk__action_fence, // Fence node }; // Action scheduling flags enum pcmk__action_flags { // No action flags set (compare with equality rather than bit set) pcmk__no_action_flags = 0, // Whether action does not require invoking an agent pcmk__action_pseudo = (UINT32_C(1) << 0), // Whether action is runnable pcmk__action_runnable = (UINT32_C(1) << 1), // Whether action should not be executed pcmk__action_optional = (UINT32_C(1) << 2), // Whether action should be added to transition graph even if optional pcmk__action_always_in_graph = (UINT32_C(1) << 3), // Whether operation-specific instance attributes have been unpacked yet pcmk__action_attrs_evaluated = (UINT32_C(1) << 4), // Whether action is allowed to be part of a live migration pcmk__action_migratable = (UINT32_C(1) << 7), // Whether action has been added to transition graph pcmk__action_added_to_graph = (UINT32_C(1) << 8), // Whether action is a stop to abort a dangling migration pcmk__action_migration_abort = (UINT32_C(1) << 11), /* * Whether action is an ordering point for minimum required instances * (used to implement ordering after clones with \c PCMK_META_CLONE_MIN * configured, and ordered sets with \c PCMK_XA_REQUIRE_ALL set to * \c PCMK_VALUE_FALSE). */ pcmk__action_min_runnable = (UINT32_C(1) << 12), // Whether action is recurring monitor that must be rescheduled if active pcmk__action_reschedule = (UINT32_C(1) << 13), // Whether action has already been processed by a recursive procedure pcmk__action_detect_loop = (UINT32_C(1) << 14), // Whether action's inputs have been de-duplicated yet pcmk__action_inputs_deduplicated = (UINT32_C(1) << 15), // Whether action can be executed on DC rather than own node pcmk__action_on_dc = (UINT32_C(1) << 16), }; /* Possible responses to a resource action failure * * The order is significant; the values are in order of increasing severity so * that they can be compared with less than and greater than. */ enum pcmk__on_fail { pcmk__on_fail_ignore, // Act as if failure didn't happen pcmk__on_fail_demote, // Demote if promotable, else stop pcmk__on_fail_restart, // Restart resource /* Fence the remote node created by the resource if fencing is enabled, * otherwise attempt to restart the resource (used internally for some * remote connection failures). */ pcmk__on_fail_reset_remote, pcmk__on_fail_restart_container, // Restart resource's container pcmk__on_fail_ban, // Ban resource from current node pcmk__on_fail_block, // Treat resource as unmanaged pcmk__on_fail_stop, // Stop resource and leave stopped pcmk__on_fail_standby_node, // Put resource's node in standby pcmk__on_fail_fence_node, // Fence resource's node }; +// What resource needs before it can be recovered from a failed node +enum pcmk__requires { + pcmk__requires_nothing = 0, // Resource can be recovered immediately + pcmk__requires_quorum = 1, // Resource can be recovered if quorate + pcmk__requires_fencing = 2, // Resource can be recovered after fencing +}; + // Implementation of pcmk_action_t struct pcmk__action { int id; // Counter to identify action /* * When the controller aborts a transition graph, it sets an abort priority. * If this priority is higher, the action will still be executed anyway. * Pseudo-actions are always allowed, so this is irrelevant for them. */ int priority; pcmk_resource_t *rsc; // Resource to apply action to, if any pcmk_node_t *node; // Node to execute action on, if any xmlNode *op_entry; // Action XML configuration, if any char *task; // Action name char *uuid; // Action key char *cancel_task; // If task is "cancel", the action being cancelled char *reason; // Readable description of why action is needed uint32_t flags; // Group of enum pcmk__action_flags - enum rsc_start_requirement needs; // Prerequisite for recovery + enum pcmk__requires needs; // Prerequisite for recovery enum pcmk__on_fail on_fail; // Response to failure enum rsc_role_e fail_role; // Resource role if action fails GHashTable *meta; // Meta-attributes relevant to action GHashTable *extra; // Action-specific instance attributes /* Current count of runnable instance actions for "first" action in an * ordering dependency with pcmk__ar_min_runnable set. */ int runnable_before; /* * Number of instance actions for "first" action in an ordering dependency * with pcmk__ar_min_runnable set that must be runnable before this action * can be runnable. */ int required_runnable_before; // Actions in a relation with this one (as pcmk__related_action_t *) GList *actions_before; GList *actions_after; }; char *pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms); char *pcmk__notify_key(const char *rsc_id, const char *notify_type, const char *op_type); char *pcmk__transition_key(int transition_id, int action_id, int target_rc, const char *node); void pcmk__filter_op_for_digest(xmlNode *param_set); bool pcmk__is_fencing_action(const char *action); enum pcmk__action_type pcmk__parse_action(const char *action_name); const char *pcmk__action_text(enum pcmk__action_type action); const char *pcmk__on_fail_text(enum pcmk__on_fail on_fail); /*! * \internal * \brief Get a human-friendly action name * * \param[in] action_name Actual action name * \param[in] interval_ms Action interval (in milliseconds) * * \return Action name suitable for display */ static inline const char * pcmk__readable_action(const char *action_name, guint interval_ms) { if ((interval_ms == 0) && pcmk__str_eq(action_name, PCMK_ACTION_MONITOR, pcmk__str_none)) { return "probe"; } return action_name; } #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_ACTIONS_INTERNAL__H diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h index 69a1655c4e..5436394102 100644 --- a/include/crm/common/resources.h +++ b/include/crm/common/resources.h @@ -1,117 +1,100 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_RESOURCES__H #define PCMK__CRM_COMMON_RESOURCES__H #include // bool #include // time_t #include // xmlNode #include // gboolean, guint, GList, GHashTable #include // enum rsc_role_e #include // pcmk_resource_t, etc. #ifdef __cplusplus extern "C" { #endif /*! * \file * \brief Scheduler API for resources * \ingroup core */ -//!@{ -//! \deprecated Do not use - -// What resource needs before it can be recovered from a failed node -enum rsc_start_requirement { - pcmk_requires_nothing = 0, // Resource can be recovered immediately - pcmk_requires_quorum = 1, // Resource can be recovered if quorate - pcmk_requires_fencing = 2, // Resource can be recovered after fencing - -#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) - rsc_req_nothing = pcmk_requires_nothing, - rsc_req_quorum = pcmk_requires_quorum, - rsc_req_stonith = pcmk_requires_fencing, -#endif -}; -//!@} - //! Search options for resources (exact resource ID always matches) enum pe_find { //! Also match clone instance ID from resource history pcmk_rsc_match_history = (1 << 0), //! Also match anonymous clone instances by base name pcmk_rsc_match_anon_basename = (1 << 1), //! Match only clones and their instances, by either clone or instance ID pcmk_rsc_match_clone_only = (1 << 2), //! If matching by node, compare current node instead of assigned node pcmk_rsc_match_current_node = (1 << 3), //! \deprecated Do not use pe_find_inactive = (1 << 4), //! Match clone instances (even unique) by base name as well as exact ID pcmk_rsc_match_basename = (1 << 5), #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1) //! \deprecated Use pcmk_rsc_match_history instead pe_find_renamed = pcmk_rsc_match_history, //! \deprecated Use pcmk_rsc_match_anon_basename instead pe_find_anon = pcmk_rsc_match_anon_basename, //! \deprecated Use pcmk_rsc_match_clone_only instead pe_find_clone = pcmk_rsc_match_clone_only, //! \deprecated Use pcmk_rsc_match_current_node instead pe_find_current = pcmk_rsc_match_current_node, //! \deprecated Use pcmk_rsc_match_basename instead pe_find_any = pcmk_rsc_match_basename, #endif }; //! \internal Do not use typedef struct pcmk__resource_private pcmk__resource_private_t; // Implementation of pcmk_resource_t // @COMPAT Make this internal when we can break API backward compatibility //!@{ //! \deprecated Do not use (public access will be removed in a future release) struct pe_resource_s { /* @COMPAT Once all members are moved to pcmk__resource_private_t, * We can make that the pcmk_resource_t implementation and drop this * struct altogether, leaving pcmk_resource_t as an opaque public type. */ pcmk__resource_private_t *private; // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_resource_id() instead char *id; // Resource ID in configuration // NOTE: sbd (as of at least 1.5.2) uses this //! \deprecated Call pcmk_resource_is_managed() instead unsigned long long flags; // Group of enum pcmk__rsc_flags }; //!@} const char *pcmk_resource_id(const pcmk_resource_t *rsc); bool pcmk_resource_is_managed(const pcmk_resource_t *rsc); #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_RESOURCES__H diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 3aea0c484d..a0ff9ab32d 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -1,420 +1,420 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_PENGINE_INTERNAL__H # define PCMK__CRM_PENGINE_INTERNAL__H # include # include # include # include # include # include # include # include # include # include const char *pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts); bool pe__clone_is_ordered(const pcmk_resource_t *clone); int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag); bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags); bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags); pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group); const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle); int pe__clone_max(const pcmk_resource_t *clone); int pe__clone_node_max(const pcmk_resource_t *clone); int pe__clone_promoted_max(const pcmk_resource_t *clone); int pe__clone_promoted_node_max(const pcmk_resource_t *clone); void pe__create_clone_notifications(pcmk_resource_t *clone); void pe__free_clone_notification_data(pcmk_resource_t *clone); void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone, pcmk_action_t *start, pcmk_action_t *started, pcmk_action_t *stop, pcmk_action_t *stopped); pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional, bool runnable); void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, bool any_promoting, bool any_demoting); bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node); char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create, const char *name, pcmk_scheduler_t *scheduler); pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list, int current); void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler, gboolean failed); gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler); pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id, const pcmk_node_t *node, int flags); gboolean native_active(pcmk_resource_t *rsc, gboolean all); gboolean group_active(pcmk_resource_t *rsc, gboolean all); gboolean clone_active(pcmk_resource_t *rsc, gboolean all); gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all); gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes); int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name, ...) G_GNUC_NULL_TERMINATED; char *pe__node_display_name(pcmk_node_t *node, bool print_detail); // Clone notifications (pe_notif.c) void pe__order_notifs_after_fencing(const pcmk_action_t *action, pcmk_resource_t *rsc, pcmk_action_t *stonith_op); // Resource output methods int pe__clone_xml(pcmk__output_t *out, va_list args); int pe__clone_default(pcmk__output_t *out, va_list args); int pe__group_xml(pcmk__output_t *out, va_list args); int pe__group_default(pcmk__output_t *out, va_list args); int pe__bundle_xml(pcmk__output_t *out, va_list args); int pe__bundle_html(pcmk__output_t *out, va_list args); int pe__bundle_text(pcmk__output_t *out, va_list args); int pe__node_html(pcmk__output_t *out, va_list args); int pe__node_text(pcmk__output_t *out, va_list args); int pe__node_xml(pcmk__output_t *out, va_list args); int pe__resource_xml(pcmk__output_t *out, va_list args); int pe__resource_html(pcmk__output_t *out, va_list args); int pe__resource_text(pcmk__output_t *out, va_list args); void native_free(pcmk_resource_t *rsc); void group_free(pcmk_resource_t *rsc); void clone_free(pcmk_resource_t *rsc); void pe__free_bundle(pcmk_resource_t *rsc); enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc, gboolean current); enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc, gboolean current); enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc, gboolean current); enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current); void pe__count_common(pcmk_resource_t *rsc); void pe__count_bundle(pcmk_resource_t *rsc); void common_free(pcmk_resource_t *rsc); pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node); time_t get_effective_time(pcmk_scheduler_t *scheduler); /* Failure handling utilities (from failcounts.c) */ int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op); pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler); /* Functions for finding/counting a resource's active nodes */ bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_node_t **active, unsigned int *count_all, unsigned int *count_clean); pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count); /* Binary like operators for lists of nodes */ GHashTable *pe__node_list2table(const GList *list); pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler); gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, uint32_t flags); void pe__show_node_scores_as(const char *file, const char *function, int line, bool to_log, const pcmk_resource_t *rsc, const char *comment, GHashTable *nodes, pcmk_scheduler_t *scheduler); #define pe__show_node_scores(level, rsc, text, nodes, scheduler) \ pe__show_node_scores_as(__FILE__, __func__, __LINE__, \ (level), (rsc), (text), (nodes), (scheduler)) GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *action_name, guint interval_ms, const xmlNode *action_config); GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml, GHashTable *node_attrs, pcmk_scheduler_t *data_set); xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, bool include_disabled); -enum rsc_start_requirement pcmk__action_requires(const pcmk_resource_t *rsc, - const char *action_name); +enum pcmk__requires pcmk__action_requires(const pcmk_resource_t *rsc, + const char *action_name); enum pcmk__on_fail pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, const char *value); enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name, enum pcmk__on_fail on_fail, GHashTable *meta); pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task, const pcmk_node_t *on_node, gboolean optional, pcmk_scheduler_t *scheduler); #define delete_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_DELETE, 0) #define stop_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_STOP, 0) #define reload_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_RELOAD_AGENT, 0) #define start_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_START, 0) #define promote_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_PROMOTE, 0) #define demote_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_DEMOTE, 0) #define delete_action(rsc, node, optional) \ custom_action((rsc), delete_key(rsc), PCMK_ACTION_DELETE, \ (node), (optional), (rsc)->private->scheduler) #define stop_action(rsc, node, optional) \ custom_action((rsc), stop_key(rsc), PCMK_ACTION_STOP, \ (node), (optional), (rsc)->private->scheduler) #define start_action(rsc, node, optional) \ custom_action((rsc), start_key(rsc), PCMK_ACTION_START, \ (node), (optional), (rsc)->private->scheduler) #define promote_action(rsc, node, optional) \ custom_action((rsc), promote_key(rsc), PCMK_ACTION_PROMOTE, \ (node), (optional), (rsc)->private->scheduler) #define demote_action(rsc, node, optional) \ custom_action((rsc), demote_key(rsc), PCMK_ACTION_DEMOTE, \ (node), (optional), (rsc)->private->scheduler) pcmk_action_t *find_first_action(const GList *input, const char *uuid, const char *task, const pcmk_node_t *on_node); enum pcmk__action_type get_complex_task(const pcmk_resource_t *rsc, const char *name); GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node); GList *find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node); GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node); extern void pe_free_action(pcmk_action_t *action); void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler); extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, bool same_node_default); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role); void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why); pcmk_resource_t *find_clone_instance(const pcmk_resource_t *rsc, const char *sub_id); extern void destroy_ticket(gpointer data); pcmk_ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler); // Resources for manipulating resource names const char *pe_base_name_end(const char *id); char *clone_strip(const char *last_rsc_id); char *clone_zero(const char *last_rsc_id); static inline bool pe_base_name_eq(const pcmk_resource_t *rsc, const char *id) { if (id && rsc && rsc->id) { // Number of characters in rsc->id before any clone suffix size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1; return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len); } return false; } int pe__target_rc_from_xml(const xmlNode *xml_op); gint pe__cmp_node_name(gconstpointer a, gconstpointer b); bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any); pcmk__op_digest_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task, guint *interval_ms, const pcmk_node_t *node, const xmlNode *xml_op, GHashTable *overrides, bool calc_secure, pcmk_scheduler_t *scheduler); void pe__free_digests(gpointer ptr); pcmk__op_digest_t *rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler); pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler); void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason, pcmk_action_t *dependency, pcmk_scheduler_t *scheduler); char *pe__action2reason(const pcmk_action_t *action, enum pcmk__action_flags flag); void pe_action_set_reason(pcmk_action_t *action, const char *reason, bool overwrite); void pe__add_action_expected_result(pcmk_action_t *action, int expected_result); void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags); void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags); void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag); gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options); void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay); pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type, const char *score, pcmk_scheduler_t *scheduler); int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, unsigned int options); int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc, const char *name, const pcmk_node_t *node, unsigned int options); GList *pe__bundle_containers(const pcmk_resource_t *bundle); int pe__bundle_max(const pcmk_resource_t *rsc); bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle, const pcmk_node_t *node); pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc); const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance); pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle); void pe__foreach_bundle_replica(pcmk_resource_t *bundle, bool (*fn)(pcmk__bundle_replica_t *, void *), void *user_data); void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle, bool (*fn)(const pcmk__bundle_replica_t *, void *), void *user_data); pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node); bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc); const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml, const char *field); bool pe__is_universal_clone(const pcmk_resource_t *rsc, const pcmk_scheduler_t *scheduler); void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc, pcmk_node_t *node, enum pcmk__check_parameters, pcmk_scheduler_t *scheduler); void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void (*cb)(pcmk_resource_t*, pcmk_node_t*, const xmlNode*, enum pcmk__check_parameters)); void pe__free_param_checks(pcmk_scheduler_t *scheduler); bool pe__shutdown_requested(const pcmk_node_t *node); void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler, const char *reason); /*! * \internal * \brief Register xml formatting message functions. * * \param[in,out] out Output object to register messages with */ void pe__register_messages(pcmk__output_t *out); void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name, const pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, pcmk_scheduler_t *scheduler); bool pe__resource_is_disabled(const pcmk_resource_t *rsc); void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node); GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name); GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name); bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc, const char *tag); bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node, const char *tag); bool pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node); bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list); GList *pe__filter_rsc_list(GList *rscs, GList *filter); GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s); GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s); bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node); gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc, gboolean check_parent); xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name); const char *pe__clone_child_id(const pcmk_resource_t *rsc); int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health); int pe__node_health(pcmk_node_t *node); static inline enum pcmk__health_strategy pe__health_strategy(pcmk_scheduler_t *scheduler) { const char *strategy = pcmk__cluster_option(scheduler->config_hash, PCMK_OPT_NODE_HEALTH_STRATEGY); return pcmk__parse_health_strategy(strategy); } static inline int pe__health_score(const char *option, pcmk_scheduler_t *scheduler) { const char *value = pcmk__cluster_option(scheduler->config_hash, option); return char2score(value); } #endif diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c index ab599ead23..b74d79a49f 100644 --- a/lib/pacemaker/pcmk_sched_fencing.c +++ b/lib/pacemaker/pcmk_sched_fencing.c @@ -1,509 +1,509 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Check whether a resource is known on a particular node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return TRUE if resource (or parent if an anonymous clone) is known */ static bool rsc_is_known_on(const pcmk_resource_t *rsc, const pcmk_node_t *node) { const pcmk_resource_t *parent = rsc->private->parent; if (g_hash_table_lookup(rsc->private->probed_nodes, node->private->id) != NULL) { return TRUE; } else if (pcmk__is_primitive(rsc) && pcmk__is_anonymous_clone(parent) && (g_hash_table_lookup(parent->private->probed_nodes, node->private->id) != NULL)) { /* We check only the parent, not the uber-parent, because we cannot * assume that the resource is known if it is in an anonymously cloned * group (which may be only partially known). */ return TRUE; } return FALSE; } /*! * \internal * \brief Order a resource's start and promote actions relative to fencing * * \param[in,out] rsc Resource to be ordered * \param[in,out] stonith_op Fence action */ static void order_start_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op) { pcmk_node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (GList *iter = rsc->private->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = iter->data; switch (action->needs) { - case pcmk_requires_nothing: + case pcmk__requires_nothing: // Anything other than start or promote requires nothing break; - case pcmk_requires_fencing: + case pcmk__requires_fencing: order_actions(stonith_op, action, pcmk__ar_ordered); break; - case pcmk_requires_quorum: + case pcmk__requires_quorum: if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none) && (g_hash_table_lookup(rsc->private->allowed_nodes, target->private->id) != NULL) && !rsc_is_known_on(rsc, target)) { /* If we don't know the status of the resource on the node * we're about to shoot, we have to assume it may be active * there. Order the resource start after the fencing. This * is analogous to waiting for all the probes for a resource * to complete before starting it. * * The most likely explanation is that the DC died and took * its status with it. */ pcmk__rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, pcmk__node_name(target)); order_actions(stonith_op, action, pcmk__ar_ordered |pcmk__ar_unrunnable_first_blocks); } break; } } } /*! * \internal * \brief Order a resource's stop and demote actions relative to fencing * * \param[in,out] rsc Resource to be ordered * \param[in,out] stonith_op Fence action */ static void order_stop_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op) { GList *iter = NULL; GList *action_list = NULL; bool order_implicit = false; pcmk_resource_t *top = uber_parent(rsc); pcmk_action_t *parent_stop = NULL; pcmk_node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Get a list of stop actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, PCMK_ACTION_STOP, FALSE); /* If resource requires fencing, implicit actions must occur after fencing. * * Implied stops and demotes of resources running on guest nodes are always * ordered after fencing, even if the resource does not require fencing, * because guest node "fencing" is actually just a resource stop. */ if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing) || pcmk__is_guest_or_bundle_node(target)) { order_implicit = true; } if (action_list && order_implicit) { parent_stop = find_first_action(top->private->actions, NULL, PCMK_ACTION_STOP, NULL); } for (iter = action_list; iter != NULL; iter = iter->next) { pcmk_action_t *action = iter->data; // The stop would never complete, so convert it into a pseudo-action. pcmk__set_action_flags(action, pcmk__action_pseudo|pcmk__action_runnable); if (order_implicit) { /* Order the stonith before the parent stop (if any). * * Also order the stonith before the resource stop, unless the * resource is inside a bundle -- that would cause a graph loop. * We can rely on the parent stop's ordering instead. * * User constraints must not order a resource in a guest node * relative to the guest node container resource. The * pcmk__ar_guest_allowed flag marks constraints as generated by the * cluster and thus immune to that check (and is irrelevant if * target is not a guest). */ if (!pcmk__is_bundled(rsc)) { order_actions(stonith_op, action, pcmk__ar_guest_allowed); } order_actions(stonith_op, parent_stop, pcmk__ar_guest_allowed); } if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { crm_notice("Stop of failed resource %s is implicit %s %s is fenced", rsc->id, (order_implicit? "after" : "because"), pcmk__node_name(target)); } else { crm_info("%s is implicit %s %s is fenced", action->uuid, (order_implicit? "after" : "because"), pcmk__node_name(target)); } if (pcmk_is_set(rsc->flags, pcmk__rsc_notify)) { pe__order_notifs_after_fencing(action, rsc, stonith_op); } #if 0 /* It might be a good idea to stop healthy resources on a node about to * be fenced, when possible. * * However, fencing must be done before a failed resource's * (pseudo-)stop action, so that could create a loop. For example, given * a group of A and B running on node N with a failed stop of B: * * fence N -> stop B (pseudo-op) -> stop A -> fence N * * The block below creates the stop A -> fence N ordering and therefore * must (at least for now) be disabled. Instead, run the block above and * treat all resources on N as B would be (i.e., as a pseudo-op after * the fencing). * * @TODO Maybe break the "A requires B" dependency in * pcmk__update_action_for_orderings() and use this block for healthy * resources instead of the above. */ crm_info("Moving healthy resource %s off %s before fencing", rsc->id, pcmk__node_name(node)); pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, strdup(PCMK_ACTION_STONITH), stonith_op, pcmk__ar_ordered, rsc->private->scheduler); #endif } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, PCMK_ACTION_DEMOTE, FALSE); for (iter = action_list; iter != NULL; iter = iter->next) { pcmk_action_t *action = iter->data; if (!(action->node->details->online) || action->node->details->unclean || pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) { pcmk__rsc_info(rsc, "Demote of failed resource %s is implicit " "after %s is fenced", rsc->id, pcmk__node_name(target)); } else { pcmk__rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, pcmk__node_name(target)); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ pcmk__set_action_flags(action, pcmk__action_pseudo|pcmk__action_runnable); if (pcmk__is_bundled(rsc)) { // Recovery will be ordered as usual after parent's implied stop } else if (order_implicit) { order_actions(stonith_op, action, pcmk__ar_guest_allowed|pcmk__ar_ordered); } } } g_list_free(action_list); } /*! * \internal * \brief Order resource actions properly relative to fencing * * \param[in,out] rsc Resource whose actions should be ordered * \param[in,out] stonith_op Fencing operation to be ordered against */ static void rsc_stonith_ordering(pcmk_resource_t *rsc, pcmk_action_t *stonith_op) { if (rsc->private->children != NULL) { for (GList *iter = rsc->private->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child_rsc = iter->data; rsc_stonith_ordering(child_rsc, stonith_op); } } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { pcmk__rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: " "%s", rsc->id); } else { order_start_vs_fencing(rsc, stonith_op); order_stop_vs_fencing(rsc, stonith_op); } } /*! * \internal * \brief Order all actions appropriately relative to a fencing operation * * Ensure start operations of affected resources are ordered after fencing, * imply stop and demote operations of affected resources by marking them as * pseudo-actions, etc. * * \param[in,out] stonith_op Fencing operation * \param[in,out] scheduler Scheduler data */ void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler) { CRM_CHECK(stonith_op && scheduler, return); for (GList *r = scheduler->resources; r != NULL; r = r->next) { rsc_stonith_ordering((pcmk_resource_t *) r->data, stonith_op); } } /*! * \internal * \brief Order an action after unfencing * * \param[in] rsc Resource that action is for * \param[in,out] node Node that action is on * \param[in,out] action Action to be ordered after unfencing * \param[in] order Ordering flags */ void pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_action_t *action, enum pcmk__action_relation_flags order) { /* When unfencing is in use, we order unfence actions before any probe or * start of resources that require unfencing, and also of fence devices. * * This might seem to violate the principle that fence devices require * only quorum. However, fence agents that unfence often don't have enough * information to even probe or start unless the node is first unfenced. */ if ((pcmk_is_set(rsc->flags, pcmk__rsc_fence_device) && pcmk_is_set(rsc->private->scheduler->flags, pcmk_sched_enable_unfencing)) || pcmk_is_set(rsc->flags, pcmk__rsc_needs_unfencing)) { /* Start with an optional ordering. Requiring unfencing would result in * the node being unfenced, and all its resources being stopped, * whenever a new resource is added -- which would be highly suboptimal. */ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, TRUE, NULL, FALSE, node->private->scheduler); order_actions(unfence, action, order); if (!pcmk__node_unfenced(node)) { // But unfencing is required if it has never been done char *reason = crm_strdup_printf("required by %s %s", rsc->id, action->task); trigger_unfencing(NULL, node, reason, NULL, node->private->scheduler); free(reason); } } } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * * \param[in,out] node Guest node to fence */ void pcmk__fence_guest(pcmk_node_t *node) { pcmk_resource_t *launcher = NULL; pcmk_action_t *stop = NULL; pcmk_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (launcher recovery). */ const char *fence_action = PCMK_ACTION_OFF; CRM_ASSERT(node != NULL); /* Check whether guest's launcher has any explicit stop or start (the stop * may be implied by fencing of the guest's host). */ launcher = node->private->remote->private->launcher; if (launcher != NULL) { stop = find_first_action(launcher->private->actions, NULL, PCMK_ACTION_STOP, NULL); if (find_first_action(launcher->private->actions, NULL, PCMK_ACTION_START, NULL)) { fence_action = PCMK_ACTION_REBOOT; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and the controller can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, node->private->scheduler); pcmk__set_action_flags(stonith_op, pcmk__action_pseudo|pcmk__action_runnable); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if ((stop != NULL) && pcmk_is_set(stop->flags, pcmk__action_pseudo)) { pcmk_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, node->private->scheduler); crm_info("Implying guest %s is down (action %d) after %s fencing", pcmk__node_name(node), stonith_op->id, pcmk__node_name(stop->node)); order_actions(parent_stonith_op, stonith_op, pcmk__ar_unrunnable_first_blocks |pcmk__ar_first_implies_then); } else if (stop) { order_actions(stop, stonith_op, pcmk__ar_unrunnable_first_blocks |pcmk__ar_first_implies_then); crm_info("Implying guest %s is down (action %d) " "after launcher %s is stopped (action %d)", pcmk__node_name(node), stonith_op->id, launcher->id, stop->id); } else { /* If we're fencing the guest node but there's no stop for the guest * resource, we must think the guest is already stopped. However, we may * think so because its resource history was just cleaned. To avoid * unnecessarily considering the guest node down if it's really up, * order the pseudo-fencing after any stop of the connection resource, * which will be ordered after any launcher (re-)probe. */ stop = find_first_action(node->private->remote->private->actions, NULL, PCMK_ACTION_STOP, NULL); if (stop) { order_actions(stop, stonith_op, pcmk__ar_ordered); crm_info("Implying guest %s is down (action %d) " "after connection is stopped (action %d)", pcmk__node_name(node), stonith_op->id, stop->id); } else { /* Not sure why we're fencing, but everything must already be * cleanly stopped. */ crm_info("Implying guest %s is down (action %d) ", pcmk__node_name(node), stonith_op->id); } } // Order/imply other actions relative to pseudo-fence as with real fence pcmk__order_vs_fence(stonith_op, node->private->scheduler); } /*! * \internal * \brief Check whether node has already been unfenced * * \param[in] node Node to check * * \return true if node has a nonzero #node-unfenced attribute (or none), * otherwise false */ bool pcmk__node_unfenced(const pcmk_node_t *node) { const char *unfenced = pcmk__node_attr(node, CRM_ATTR_UNFENCED, NULL, pcmk__rsc_node_current); return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches); } /*! * \internal * \brief Order a resource's start and stop relative to unfencing of a node * * \param[in,out] data Node that could be unfenced * \param[in,out] user_data Resource to order */ void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data) { pcmk_node_t *node = (pcmk_node_t *) data; pcmk_resource_t *rsc = (pcmk_resource_t *) user_data; pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, true, NULL, false, rsc->private->scheduler); crm_debug("Ordering any stops of %s before %s, and any starts after", rsc->id, unfence->uuid); /* * It would be more efficient to order clone resources once, * rather than order each instance, but ordering the instance * allows us to avoid unnecessary dependencies that might conflict * with user constraints. * * @TODO: This constraint can still produce a transition loop if the * resource has a stop scheduled on the node being unfenced, and * there is a user ordering constraint to start some other resource * (which will be ordered after the unfence) before stopping this * resource. An example is "start some slow-starting cloned service * before stopping an associated virtual IP that may be moving to * it": * stop this -> unfencing -> start that -> stop this */ pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, strdup(unfence->uuid), unfence, pcmk__ar_ordered|pcmk__ar_if_on_same_node, rsc->private->scheduler); pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence, rsc, start_key(rsc), NULL, pcmk__ar_first_implies_same_node_then |pcmk__ar_if_on_same_node, rsc->private->scheduler); } diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c index c9f4fbc539..366eaa679d 100644 --- a/lib/pengine/pe_actions.c +++ b/lib/pengine/pe_actions.c @@ -1,1809 +1,1809 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include "pe_status_private.h" static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj, guint interval_ms); static void add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action) { if (scheduler->singletons == NULL) { scheduler->singletons = pcmk__strkey_table(NULL, NULL); } g_hash_table_insert(scheduler->singletons, action->uuid, action); } static pcmk_action_t * lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid) { if (scheduler->singletons == NULL) { return NULL; } return g_hash_table_lookup(scheduler->singletons, action_uuid); } /*! * \internal * \brief Find an existing action that matches arguments * * \param[in] key Action key to match * \param[in] rsc Resource to match (if any) * \param[in] node Node to match (if any) * \param[in] scheduler Scheduler data * * \return Existing action that matches arguments (or NULL if none) */ static pcmk_action_t * find_existing_action(const char *key, const pcmk_resource_t *rsc, const pcmk_node_t *node, const pcmk_scheduler_t *scheduler) { /* When rsc is NULL, it would be quicker to check scheduler->singletons, * but checking all scheduler->actions takes the node into account. */ GList *actions = (rsc == NULL)? scheduler->actions : rsc->private->actions; GList *matches = find_actions(actions, key, node); pcmk_action_t *action = NULL; if (matches == NULL) { return NULL; } CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches)); action = matches->data; g_list_free(matches); return action; } /*! * \internal * \brief Find the XML configuration corresponding to a specific action key * * \param[in] rsc Resource to find action configuration for * \param[in] key "RSC_ACTION_INTERVAL" of action to find * \param[in] include_disabled If false, do not return disabled actions * * \return XML configuration of desired action if any, otherwise NULL */ static xmlNode * find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, bool include_disabled) { for (xmlNode *operation = pcmk__xe_first_child(rsc->private->ops_xml, PCMK_XE_OP, NULL, NULL); operation != NULL; operation = pcmk__xe_next_same(operation)) { bool enabled = false; const char *config_name = NULL; const char *interval_spec = NULL; guint tmp_ms = 0U; // @TODO This does not consider meta-attributes, rules, defaults, etc. if (!include_disabled && (pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED, &enabled) == pcmk_rc_ok) && !enabled) { continue; } interval_spec = crm_element_value(operation, PCMK_META_INTERVAL); pcmk_parse_interval_spec(interval_spec, &tmp_ms); if (tmp_ms != interval_ms) { continue; } config_name = crm_element_value(operation, PCMK_XA_NAME); if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) { return operation; } } return NULL; } /*! * \internal * \brief Find the XML configuration of a resource action * * \param[in] rsc Resource to find action configuration for * \param[in] action_name Action name to search for * \param[in] interval_ms Action interval (in milliseconds) to search for * \param[in] include_disabled If false, do not return disabled actions * * \return XML configuration of desired action if any, otherwise NULL */ xmlNode * pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, bool include_disabled) { xmlNode *action_config = NULL; // Try requested action first action_config = find_exact_action_config(rsc, action_name, interval_ms, include_disabled); // For migrate_to and migrate_from actions, retry with "migrate" // @TODO This should be either documented or deprecated if ((action_config == NULL) && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL)) { action_config = find_exact_action_config(rsc, "migrate", 0, include_disabled); } return action_config; } /*! * \internal * \brief Create a new action object * * \param[in] key Action key * \param[in] task Action name * \param[in,out] rsc Resource that action is for (if any) * \param[in] node Node that action is on (if any) * \param[in] optional Whether action should be considered optional * \param[in,out] scheduler Scheduler data * * \return Newly allocated action * \note This function takes ownership of \p key. It is the caller's * responsibility to free the return value with pe_free_action(). */ static pcmk_action_t * new_action(char *key, const char *task, pcmk_resource_t *rsc, const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler) { pcmk_action_t *action = pcmk__assert_alloc(1, sizeof(pcmk_action_t)); action->rsc = rsc; action->task = pcmk__str_copy(task); action->uuid = key; if (node) { action->node = pe__copy_node(node); } if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) { // Resource history deletion for a node can be done on the DC pcmk__set_action_flags(action, pcmk__action_on_dc); } pcmk__set_action_flags(action, pcmk__action_runnable); if (optional) { pcmk__set_action_flags(action, pcmk__action_optional); } else { pcmk__clear_action_flags(action, pcmk__action_optional); } if (rsc == NULL) { action->meta = pcmk__strkey_table(free, free); } else { guint interval_ms = 0; parse_op_key(key, NULL, NULL, &interval_ms); action->op_entry = pcmk__find_action_config(rsc, task, interval_ms, true); /* If the given key is for one of the many notification pseudo-actions * (pre_notify_promote, etc.), the actual action name is "notify" */ if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) { action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY, 0, true); } unpack_operation(action, action->op_entry, interval_ms); } pcmk__rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s", (optional? "optional" : "required"), scheduler->action_id, key, task, ((rsc == NULL)? "no resource" : rsc->id), pcmk__node_name(node)); action->id = scheduler->action_id++; scheduler->actions = g_list_prepend(scheduler->actions, action); if (rsc == NULL) { add_singleton(scheduler, action); } else { rsc->private->actions = g_list_prepend(rsc->private->actions, action); } return action; } /*! * \internal * \brief Unpack a resource's action-specific instance parameters * * \param[in] action_xml XML of action's configuration in CIB (if any) * \param[in,out] node_attrs Table of node attributes (for rule evaluation) * \param[in,out] scheduler Cluster working set (for rule evaluation) * * \return Newly allocated hash table of action-specific instance parameters */ GHashTable * pcmk__unpack_action_rsc_params(const xmlNode *action_xml, GHashTable *node_attrs, pcmk_scheduler_t *scheduler) { GHashTable *params = pcmk__strkey_table(free, free); pe_rule_eval_data_t rule_data = { .node_hash = node_attrs, .now = scheduler->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; pe__unpack_dataset_nvpairs(action_xml, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data, params, NULL, FALSE, scheduler); return params; } /*! * \internal * \brief Update an action's optional flag * * \param[in,out] action Action to update * \param[in] optional Requested optional status */ static void update_action_optional(pcmk_action_t *action, gboolean optional) { // Force a non-recurring action to be optional if its resource is unmanaged if ((action->rsc != NULL) && (action->node != NULL) && !pcmk_is_set(action->flags, pcmk__action_pseudo) && !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed) && (g_hash_table_lookup(action->meta, PCMK_META_INTERVAL) == NULL)) { pcmk__rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)", action->uuid, pcmk__node_name(action->node), action->rsc->id); pcmk__set_action_flags(action, pcmk__action_optional); // We shouldn't clear runnable here because ... something // Otherwise require the action if requested } else if (!optional) { pcmk__clear_action_flags(action, pcmk__action_optional); } } static enum pe_quorum_policy effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { enum pe_quorum_policy policy = scheduler->no_quorum_policy; if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) { policy = pcmk_no_quorum_ignore; } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) { switch (rsc->private->orig_role) { case pcmk_role_promoted: case pcmk_role_unpromoted: if (rsc->private->next_role > pcmk_role_unpromoted) { pe__set_next_role(rsc, pcmk_role_unpromoted, PCMK_OPT_NO_QUORUM_POLICY "=demote"); } policy = pcmk_no_quorum_ignore; break; default: policy = pcmk_no_quorum_stop; break; } } return policy; } /*! * \internal * \brief Update a resource action's runnable flag * * \param[in,out] action Action to update * \param[in,out] scheduler Scheduler data * * \note This may also schedule fencing if a stop is unrunnable. */ static void update_resource_action_runnable(pcmk_action_t *action, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = action->rsc; if (pcmk_is_set(action->flags, pcmk__action_pseudo)) { return; } if (action->node == NULL) { pcmk__rsc_trace(rsc, "%s is unrunnable (unallocated)", action->uuid); pcmk__clear_action_flags(action, pcmk__action_runnable); } else if (!pcmk_is_set(action->flags, pcmk__action_on_dc) && !(action->node->details->online) && (!pcmk__is_guest_or_bundle_node(action->node) || pcmk_is_set(action->node->private->flags, pcmk__node_remote_reset))) { pcmk__clear_action_flags(action, pcmk__action_runnable); do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)", action->uuid, pcmk__node_name(action->node)); if (pcmk_is_set(rsc->flags, pcmk__rsc_managed) && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei) && !(action->node->details->unclean)) { pe_fence_node(scheduler, action->node, "stop is unrunnable", false); } } else if (!pcmk_is_set(action->flags, pcmk__action_on_dc) && action->node->details->pending) { pcmk__clear_action_flags(action, pcmk__action_runnable); do_crm_log(LOG_WARNING, "Action %s on %s is unrunnable (node is pending)", action->uuid, pcmk__node_name(action->node)); - } else if (action->needs == pcmk_requires_nothing) { + } else if (action->needs == pcmk__requires_nothing) { pe_action_set_reason(action, NULL, TRUE); if (pcmk__is_guest_or_bundle_node(action->node) && !pe_can_fence(scheduler, action->node)) { /* An action that requires nothing usually does not require any * fencing in order to be runnable. However, there is an exception: * such an action cannot be completed if it is on a guest node whose * host is unclean and cannot be fenced. */ pcmk__rsc_debug(rsc, "%s on %s is unrunnable " "(node's host cannot be fenced)", action->uuid, pcmk__node_name(action->node)); pcmk__clear_action_flags(action, pcmk__action_runnable); } else { pcmk__rsc_trace(rsc, "%s on %s does not require fencing or quorum", action->uuid, pcmk__node_name(action->node)); pcmk__set_action_flags(action, pcmk__action_runnable); } } else { switch (effective_quorum_policy(rsc, scheduler)) { case pcmk_no_quorum_stop: pcmk__rsc_debug(rsc, "%s on %s is unrunnable (no quorum)", action->uuid, pcmk__node_name(action->node)); pcmk__clear_action_flags(action, pcmk__action_runnable); pe_action_set_reason(action, "no quorum", true); break; case pcmk_no_quorum_freeze: if (!rsc->private->fns->active(rsc, TRUE) || (rsc->private->next_role > rsc->private->orig_role)) { pcmk__rsc_debug(rsc, "%s on %s is unrunnable (no quorum)", action->uuid, pcmk__node_name(action->node)); pcmk__clear_action_flags(action, pcmk__action_runnable); pe_action_set_reason(action, "quorum freeze", true); } break; default: //pe_action_set_reason(action, NULL, TRUE); pcmk__set_action_flags(action, pcmk__action_runnable); break; } } } static bool valid_stop_on_fail(const char *value) { return !pcmk__strcase_any_of(value, PCMK_VALUE_STANDBY, PCMK_VALUE_DEMOTE, PCMK_VALUE_STOP, NULL); } /*! * \internal * \brief Validate (and possibly reset) resource action's on_fail meta-attribute * * \param[in] rsc Resource that action is for * \param[in] action_name Action name * \param[in] action_config Action configuration XML from CIB (if any) * \param[in,out] meta Table of action meta-attributes */ static void validate_on_fail(const pcmk_resource_t *rsc, const char *action_name, const xmlNode *action_config, GHashTable *meta) { const char *name = NULL; const char *role = NULL; const char *interval_spec = NULL; const char *value = g_hash_table_lookup(meta, PCMK_META_ON_FAIL); guint interval_ms = 0U; // Stop actions can only use certain on-fail values if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none) && !valid_stop_on_fail(value)) { pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for %s stop " "action to default value because '%s' is not " "allowed for stop", rsc->id, value); g_hash_table_remove(meta, PCMK_META_ON_FAIL); return; } /* Demote actions default on-fail to the on-fail value for the first * recurring monitor for the promoted role (if any). */ if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none) && (value == NULL)) { /* @TODO This does not consider promote options set in a meta-attribute * block (which may have rules that need to be evaluated) rather than * XML properties. */ for (xmlNode *operation = pcmk__xe_first_child(rsc->private->ops_xml, PCMK_XE_OP, NULL, NULL); operation != NULL; operation = pcmk__xe_next_same(operation)) { bool enabled = false; const char *promote_on_fail = NULL; /* We only care about explicit on-fail (if promote uses default, so * can demote) */ promote_on_fail = crm_element_value(operation, PCMK_META_ON_FAIL); if (promote_on_fail == NULL) { continue; } // We only care about recurring monitors for the promoted role name = crm_element_value(operation, PCMK_XA_NAME); role = crm_element_value(operation, PCMK_XA_ROLE); if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none) || !pcmk__strcase_any_of(role, PCMK_ROLE_PROMOTED, PCMK__ROLE_PROMOTED_LEGACY, NULL)) { continue; } interval_spec = crm_element_value(operation, PCMK_META_INTERVAL); pcmk_parse_interval_spec(interval_spec, &interval_ms); if (interval_ms == 0U) { continue; } // We only care about enabled monitors if ((pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED, &enabled) == pcmk_rc_ok) && !enabled) { continue; } /* Demote actions can't default to * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE */ if (pcmk__str_eq(promote_on_fail, PCMK_VALUE_DEMOTE, pcmk__str_casei)) { continue; } // Use value from first applicable promote action found pcmk__insert_dup(meta, PCMK_META_ON_FAIL, promote_on_fail); } return; } if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none) && !pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) { pcmk__insert_dup(meta, PCMK_META_ON_FAIL, PCMK_VALUE_IGNORE); return; } // PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE is allowed only for certain actions if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) { name = crm_element_value(action_config, PCMK_XA_NAME); role = crm_element_value(action_config, PCMK_XA_ROLE); interval_spec = crm_element_value(action_config, PCMK_META_INTERVAL); pcmk_parse_interval_spec(interval_spec, &interval_ms); if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none) && ((interval_ms == 0U) || !pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none) || !pcmk__strcase_any_of(role, PCMK_ROLE_PROMOTED, PCMK__ROLE_PROMOTED_LEGACY, NULL))) { pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for %s %s " "action to default value because 'demote' is not " "allowed for it", rsc->id, name); g_hash_table_remove(meta, PCMK_META_ON_FAIL); return; } } } static int unpack_timeout(const char *value) { long long timeout_ms = crm_get_msec(value); if (timeout_ms <= 0) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } return (int) QB_MIN(timeout_ms, INT_MAX); } // true if value contains valid, non-NULL interval origin for recurring op static bool unpack_interval_origin(const char *value, const xmlNode *xml_obj, guint interval_ms, const crm_time_t *now, long long *start_delay) { long long result = 0; guint interval_sec = interval_ms / 1000; crm_time_t *origin = NULL; // Ignore unspecified values and non-recurring operations if ((value == NULL) || (interval_ms == 0) || (now == NULL)) { return false; } // Parse interval origin from text origin = crm_time_new(value); if (origin == NULL) { pcmk__config_err("Ignoring '" PCMK_META_INTERVAL_ORIGIN "' for " "operation '%s' because '%s' is not valid", pcmk__s(pcmk__xe_id(xml_obj), "(missing ID)"), value); return false; } // Get seconds since origin (negative if origin is in the future) result = crm_time_get_seconds(now) - crm_time_get_seconds(origin); crm_time_free(origin); // Calculate seconds from closest interval to now result = result % interval_sec; // Calculate seconds remaining until next interval result = ((result <= 0)? 0 : interval_sec) - result; crm_info("Calculated a start delay of %llds for operation '%s'", result, pcmk__s(pcmk__xe_id(xml_obj), "(unspecified)")); if (start_delay != NULL) { *start_delay = result * 1000; // milliseconds } return true; } static int unpack_start_delay(const char *value, GHashTable *meta) { long long start_delay_ms = 0; if (value == NULL) { return 0; } start_delay_ms = crm_get_msec(value); start_delay_ms = QB_MIN(start_delay_ms, INT_MAX); if (start_delay_ms < 0) { start_delay_ms = 0; } if (meta != NULL) { g_hash_table_replace(meta, strdup(PCMK_META_START_DELAY), pcmk__itoa(start_delay_ms)); } return (int) start_delay_ms; } /*! * \internal * \brief Find a resource's most frequent recurring monitor * * \param[in] rsc Resource to check * * \return Operation XML configured for most frequent recurring monitor for * \p rsc (if any) */ static xmlNode * most_frequent_monitor(const pcmk_resource_t *rsc) { guint min_interval_ms = G_MAXUINT; xmlNode *op = NULL; for (xmlNode *operation = pcmk__xe_first_child(rsc->private->ops_xml, PCMK_XE_OP, NULL, NULL); operation != NULL; operation = pcmk__xe_next_same(operation)) { bool enabled = false; guint interval_ms = 0U; const char *interval_spec = crm_element_value(operation, PCMK_META_INTERVAL); // We only care about enabled recurring monitors if (!pcmk__str_eq(crm_element_value(operation, PCMK_XA_NAME), PCMK_ACTION_MONITOR, pcmk__str_none)) { continue; } pcmk_parse_interval_spec(interval_spec, &interval_ms); if (interval_ms == 0U) { continue; } // @TODO This does not consider meta-attributes, rules, defaults, etc. if ((pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED, &enabled) == pcmk_rc_ok) && !enabled) { continue; } if (interval_ms < min_interval_ms) { min_interval_ms = interval_ms; op = operation; } } return op; } /*! * \internal * \brief Unpack action meta-attributes * * \param[in,out] rsc Resource that action is for * \param[in] node Node that action is on * \param[in] action_name Action name * \param[in] interval_ms Action interval (in milliseconds) * \param[in] action_config Action XML configuration from CIB (if any) * * Unpack a resource action's meta-attributes (normalizing the interval, * timeout, and start delay values as integer milliseconds) from its CIB XML * configuration (including defaults). * * \return Newly allocated hash table with normalized action meta-attributes */ GHashTable * pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *action_name, guint interval_ms, const xmlNode *action_config) { GHashTable *meta = NULL; const char *timeout_spec = NULL; const char *str = NULL; pe_rsc_eval_data_t rsc_rule_data = { .standard = crm_element_value(rsc->private->xml, PCMK_XA_CLASS), .provider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER), .agent = crm_element_value(rsc->private->xml, PCMK_XA_TYPE), }; pe_op_eval_data_t op_rule_data = { .op_name = action_name, .interval = interval_ms, }; pe_rule_eval_data_t rule_data = { /* @COMPAT Support for node attribute expressions in operation * meta-attributes (whether in the operation configuration or operation * defaults) is deprecated. When we can break behavioral backward * compatibility, drop this line. */ .node_hash = (node == NULL)? NULL : node->private->attrs, .now = rsc->private->scheduler->now, .match_data = NULL, .rsc_data = &rsc_rule_data, .op_data = &op_rule_data, }; meta = pcmk__strkey_table(free, free); // Cluster-wide pe__unpack_dataset_nvpairs(rsc->private->scheduler->op_defaults, PCMK_XE_META_ATTRIBUTES, &rule_data, meta, NULL, FALSE, rsc->private->scheduler); // Derive default timeout for probes from recurring monitor timeouts if (pcmk_is_probe(action_name, interval_ms)) { xmlNode *min_interval_mon = most_frequent_monitor(rsc); if (min_interval_mon != NULL) { /* @TODO This does not consider timeouts set in * PCMK_XE_META_ATTRIBUTES blocks (which may also have rules that * need to be evaluated). */ timeout_spec = crm_element_value(min_interval_mon, PCMK_META_TIMEOUT); if (timeout_spec != NULL) { pcmk__rsc_trace(rsc, "Setting default timeout for %s probe to " "most frequent monitor's timeout '%s'", rsc->id, timeout_spec); pcmk__insert_dup(meta, PCMK_META_TIMEOUT, timeout_spec); } } } if (action_config != NULL) { // take precedence over defaults pe__unpack_dataset_nvpairs(action_config, PCMK_XE_META_ATTRIBUTES, &rule_data, meta, NULL, TRUE, rsc->private->scheduler); /* Anything set as an XML property has highest precedence. * This ensures we use the name and interval from the tag. * (See below for the only exception, fence device start/probe timeout.) */ for (xmlAttrPtr attr = action_config->properties; attr != NULL; attr = attr->next) { pcmk__insert_dup(meta, (const char *) attr->name, pcmk__xml_attr_value(attr)); } } g_hash_table_remove(meta, PCMK_XA_ID); // Normalize interval to milliseconds if (interval_ms > 0) { g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_INTERVAL), crm_strdup_printf("%u", interval_ms)); } else { g_hash_table_remove(meta, PCMK_META_INTERVAL); } /* Timeout order of precedence (highest to lowest): * 1. pcmk_monitor_timeout resource parameter (only for starts and probes * when rsc has pcmk_ra_cap_fence_params; this gets used for recurring * monitors via the executor instead) * 2. timeout configured in (with taking precedence over * ) * 3. timeout configured in * 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS */ // Check for pcmk_monitor_timeout if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard), pcmk_ra_cap_fence_params) && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none) || pcmk_is_probe(action_name, interval_ms))) { GHashTable *params = pe_rsc_params(rsc, node, rsc->private->scheduler); timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout"); if (timeout_spec != NULL) { pcmk__rsc_trace(rsc, "Setting timeout for %s %s to " "pcmk_monitor_timeout (%s)", rsc->id, action_name, timeout_spec); pcmk__insert_dup(meta, PCMK_META_TIMEOUT, timeout_spec); } } // Normalize timeout to positive milliseconds timeout_spec = g_hash_table_lookup(meta, PCMK_META_TIMEOUT); g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_TIMEOUT), pcmk__itoa(unpack_timeout(timeout_spec))); // Ensure on-fail has a valid value validate_on_fail(rsc, action_name, action_config, meta); // Normalize PCMK_META_START_DELAY str = g_hash_table_lookup(meta, PCMK_META_START_DELAY); if (str != NULL) { unpack_start_delay(str, meta); } else { long long start_delay = 0; str = g_hash_table_lookup(meta, PCMK_META_INTERVAL_ORIGIN); if (unpack_interval_origin(str, action_config, interval_ms, rsc->private->scheduler->now, &start_delay)) { g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_START_DELAY), crm_strdup_printf("%lld", start_delay)); } } return meta; } /*! * \internal * \brief Determine an action's quorum and fencing dependency * * \param[in] rsc Resource that action is for * \param[in] action_name Name of action being unpacked * * \return Quorum and fencing dependency appropriate to action */ -enum rsc_start_requirement +enum pcmk__requires pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name) { const char *value = NULL; - enum rsc_start_requirement requires = pcmk_requires_nothing; + enum pcmk__requires requires = pcmk__requires_nothing; CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires); if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START, PCMK_ACTION_PROMOTE, NULL)) { value = "nothing (not start or promote)"; } else if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) { - requires = pcmk_requires_fencing; + requires = pcmk__requires_fencing; value = "fencing"; } else if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_quorum)) { - requires = pcmk_requires_quorum; + requires = pcmk__requires_quorum; value = "quorum"; } else { value = "nothing"; } pcmk__rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value); return requires; } /*! * \internal * \brief Parse action failure response from a user-provided string * * \param[in] rsc Resource that action is for * \param[in] action_name Name of action * \param[in] interval_ms Action interval (in milliseconds) * \param[in] value User-provided configuration value for on-fail * * \return Action failure response parsed from \p text */ enum pcmk__on_fail pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name, guint interval_ms, const char *value) { const char *desc = NULL; bool needs_remote_reset = false; enum pcmk__on_fail on_fail = pcmk__on_fail_ignore; const pcmk_scheduler_t *scheduler = NULL; // There's no enum value for unknown or invalid, so assert CRM_ASSERT((rsc != NULL) && (action_name != NULL)); scheduler = rsc->private->scheduler; if (value == NULL) { // Use default } else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) { on_fail = pcmk__on_fail_block; desc = "block"; } else if (pcmk__str_eq(value, PCMK_VALUE_FENCE, pcmk__str_casei)) { if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { on_fail = pcmk__on_fail_fence_node; desc = "node fencing"; } else { pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for " "%s of %s to 'stop' because 'fence' is not " "valid when fencing is disabled", action_name, rsc->id); on_fail = pcmk__on_fail_stop; desc = "stop resource"; } } else if (pcmk__str_eq(value, PCMK_VALUE_STANDBY, pcmk__str_casei)) { on_fail = pcmk__on_fail_standby_node; desc = "node standby"; } else if (pcmk__strcase_any_of(value, PCMK_VALUE_IGNORE, PCMK_VALUE_NOTHING, NULL)) { desc = "ignore"; } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) { on_fail = pcmk__on_fail_ban; desc = "force migration"; } else if (pcmk__str_eq(value, PCMK_VALUE_STOP, pcmk__str_casei)) { on_fail = pcmk__on_fail_stop; desc = "stop resource"; } else if (pcmk__str_eq(value, PCMK_VALUE_RESTART, pcmk__str_casei)) { on_fail = pcmk__on_fail_restart; desc = "restart (and possibly migrate)"; } else if (pcmk__str_eq(value, PCMK_VALUE_RESTART_CONTAINER, pcmk__str_casei)) { if (rsc->private->launcher == NULL) { pcmk__rsc_debug(rsc, "Using default " PCMK_META_ON_FAIL " for %s " "of %s because it does not have a launcher", action_name, rsc->id); } else { on_fail = pcmk__on_fail_restart_container; desc = "restart container (and possibly migrate)"; } } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) { on_fail = pcmk__on_fail_demote; desc = "demote instance"; } else { pcmk__config_err("Using default '" PCMK_META_ON_FAIL "' for " "%s of %s because '%s' is not valid", action_name, rsc->id, value); } /* Remote node connections are handled specially. Failures that result * in dropping an active connection must result in fencing. The only * failures that don't are probes and starts. The user can explicitly set * PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence after start failures. */ if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection) && pcmk__is_remote_node(pcmk_find_node(scheduler, rsc->id)) && !pcmk_is_probe(action_name, interval_ms) && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) { needs_remote_reset = true; if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { desc = NULL; // Force default for unmanaged connections } } if (desc != NULL) { // Explicit value used, default not needed } else if (rsc->private->launcher != NULL) { on_fail = pcmk__on_fail_restart_container; desc = "restart container (and possibly migrate) (default)"; } else if (needs_remote_reset) { if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { desc = "fence remote node (default)"; } else { desc = "recover remote node connection (default)"; } on_fail = pcmk__on_fail_reset_remote; } else { on_fail = pcmk__on_fail_stop; desc = "stop unmanaged remote node (enforcing default)"; } } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) { if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { on_fail = pcmk__on_fail_fence_node; desc = "resource fence (default)"; } else { on_fail = pcmk__on_fail_block; desc = "resource block (default)"; } } else { on_fail = pcmk__on_fail_restart; desc = "restart (and possibly migrate) (default)"; } pcmk__rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s", pcmk__readable_interval(interval_ms), action_name, rsc->id, desc); return on_fail; } /*! * \internal * \brief Determine a resource's role after failure of an action * * \param[in] rsc Resource that action is for * \param[in] action_name Action name * \param[in] on_fail Failure handling for action * \param[in] meta Unpacked action meta-attributes * * \return Resource role that results from failure of action */ enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name, enum pcmk__on_fail on_fail, GHashTable *meta) { const char *value = NULL; enum rsc_role_e role = pcmk_role_unknown; // Set default for role after failure specially in certain circumstances switch (on_fail) { case pcmk__on_fail_stop: role = pcmk_role_stopped; break; case pcmk__on_fail_reset_remote: if (rsc->private->remote_reconnect_ms != 0U) { role = pcmk_role_stopped; } break; default: break; } // @COMPAT Check for explicitly configured role (deprecated) value = g_hash_table_lookup(meta, PCMK__META_ROLE_AFTER_FAILURE); if (value != NULL) { pcmk__warn_once(pcmk__wo_role_after, "Support for " PCMK__META_ROLE_AFTER_FAILURE " is " "deprecated and will be removed in a future release"); if (role == pcmk_role_unknown) { role = pcmk_parse_role(value); if (role == pcmk_role_unknown) { pcmk__config_err("Ignoring invalid value %s for " PCMK__META_ROLE_AFTER_FAILURE, value); } } } if (role == pcmk_role_unknown) { // Use default if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) { role = pcmk_role_unpromoted; } else { role = pcmk_role_started; } } pcmk__rsc_trace(rsc, "Role after %s %s failure is: %s", rsc->id, action_name, pcmk_role_text(role)); return role; } /*! * \internal * \brief Unpack action configuration * * Unpack a resource action's meta-attributes (normalizing the interval, * timeout, and start delay values as integer milliseconds), requirements, and * failure policy from its CIB XML configuration (including defaults). * * \param[in,out] action Resource action to unpack into * \param[in] xml_obj Action configuration XML (NULL for defaults only) * \param[in] interval_ms How frequently to perform the operation */ static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj, guint interval_ms) { const char *value = NULL; action->meta = pcmk__unpack_action_meta(action->rsc, action->node, action->task, interval_ms, xml_obj); action->needs = pcmk__action_requires(action->rsc, action->task); value = g_hash_table_lookup(action->meta, PCMK_META_ON_FAIL); action->on_fail = pcmk__parse_on_fail(action->rsc, action->task, interval_ms, value); action->fail_role = pcmk__role_after_failure(action->rsc, action->task, action->on_fail, action->meta); } /*! * \brief Create or update an action object * * \param[in,out] rsc Resource that action is for (if any) * \param[in,out] key Action key (must be non-NULL) * \param[in] task Action name (must be non-NULL) * \param[in] on_node Node that action is on (if any) * \param[in] optional Whether action should be considered optional * \param[in,out] scheduler Scheduler data * * \return Action object corresponding to arguments (guaranteed not to be * \c NULL) * \note This function takes ownership of (and might free) \p key, and * \p scheduler takes ownership of the returned action (the caller should * not free it). */ pcmk_action_t * custom_action(pcmk_resource_t *rsc, char *key, const char *task, const pcmk_node_t *on_node, gboolean optional, pcmk_scheduler_t *scheduler) { pcmk_action_t *action = NULL; CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL)); action = find_existing_action(key, rsc, on_node, scheduler); if (action == NULL) { action = new_action(key, task, rsc, on_node, optional, scheduler); } else { free(key); } update_action_optional(action, optional); if (rsc != NULL) { /* An action can be initially created with a NULL node, and later have * the node added via find_existing_action() (above) -> find_actions(). * That is why the extra parameters are unpacked here rather than in * new_action(). */ if ((action->node != NULL) && (action->op_entry != NULL) && !pcmk_is_set(action->flags, pcmk__action_attrs_evaluated)) { GHashTable *attrs = action->node->private->attrs; if (action->extra != NULL) { g_hash_table_destroy(action->extra); } action->extra = pcmk__unpack_action_rsc_params(action->op_entry, attrs, scheduler); pcmk__set_action_flags(action, pcmk__action_attrs_evaluated); } update_resource_action_runnable(action, scheduler); } if (action->extra == NULL) { action->extra = pcmk__strkey_table(free, free); } return action; } pcmk_action_t * get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler) { pcmk_action_t *op = lookup_singleton(scheduler, name); if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler); pcmk__set_action_flags(op, pcmk__action_pseudo|pcmk__action_runnable); } return op; } static GList * find_unfencing_devices(GList *candidates, GList *matches) { for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *candidate = gIter->data; if (candidate->private->children != NULL) { matches = find_unfencing_devices(candidate->private->children, matches); } else if (!pcmk_is_set(candidate->flags, pcmk__rsc_fence_device)) { continue; } else if (pcmk_is_set(candidate->flags, pcmk__rsc_needs_unfencing)) { matches = g_list_prepend(matches, candidate); } else if (pcmk__str_eq(g_hash_table_lookup(candidate->private->meta, PCMK_STONITH_PROVIDES), PCMK_VALUE_UNFENCING, pcmk__str_casei)) { matches = g_list_prepend(matches, candidate); } } return matches; } static int node_priority_fencing_delay(const pcmk_node_t *node, const pcmk_scheduler_t *scheduler) { int member_count = 0; int online_count = 0; int top_priority = 0; int lowest_priority = 0; GList *gIter = NULL; // PCMK_OPT_PRIORITY_FENCING_DELAY is disabled if (scheduler->priority_fencing_delay <= 0) { return 0; } /* No need to request a delay if the fencing target is not a normal cluster * member, for example if it's a remote node or a guest node. */ if (node->private->variant != pcmk__node_variant_cluster) { return 0; } // No need to request a delay if the fencing target is in our partition if (node->details->online) { return 0; } for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *n = gIter->data; if (n->private->variant != pcmk__node_variant_cluster) { continue; } member_count ++; if (n->details->online) { online_count++; } if (member_count == 1 || n->private->priority > top_priority) { top_priority = n->private->priority; } if (member_count == 1 || n->private->priority < lowest_priority) { lowest_priority = n->private->priority; } } // No need to delay if we have more than half of the cluster members if (online_count > member_count / 2) { return 0; } /* All the nodes have equal priority. * Any configured corresponding `pcmk_delay_base/max` will be applied. */ if (lowest_priority == top_priority) { return 0; } if (node->private->priority < top_priority) { return 0; } return scheduler->priority_fencing_delay; } pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler) { char *op_key = NULL; pcmk_action_t *stonith_op = NULL; if(op == NULL) { op = scheduler->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", PCMK_ACTION_STONITH, node->private->name, op); stonith_op = lookup_singleton(scheduler, op_key); if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node, TRUE, scheduler); pcmk__insert_meta(stonith_op, PCMK__META_ON_NODE, node->private->name); pcmk__insert_meta(stonith_op, PCMK__META_ON_NODE_UUID, node->private->id); pcmk__insert_meta(stonith_op, PCMK__META_STONITH_ACTION, op); if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) { /* Extra work to detect device changes */ GString *digests_all = g_string_sized_new(1024); GString *digests_secure = g_string_sized_new(1024); GList *matches = find_unfencing_devices(scheduler->resources, NULL); for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *match = gIter->data; const char *agent = g_hash_table_lookup(match->private->meta, PCMK_XA_TYPE); pcmk__op_digest_t *data = NULL; data = pe__compare_fencing_digest(match, agent, node, scheduler); if (data->rc == pcmk__digest_mismatch) { optional = FALSE; crm_notice("Unfencing node %s because the definition of " "%s changed", pcmk__node_name(node), match->id); if (!pcmk__is_daemon && scheduler->priv != NULL) { pcmk__output_t *out = scheduler->priv; out->info(out, "notice: Unfencing node %s because the " "definition of %s changed", pcmk__node_name(node), match->id); } } pcmk__g_strcat(digests_all, match->id, ":", agent, ":", data->digest_all_calc, ",", NULL); pcmk__g_strcat(digests_secure, match->id, ":", agent, ":", data->digest_secure_calc, ",", NULL); } pcmk__insert_dup(stonith_op->meta, PCMK__META_DIGESTS_ALL, digests_all->str); g_string_free(digests_all, TRUE); pcmk__insert_dup(stonith_op->meta, PCMK__META_DIGESTS_SECURE, digests_secure->str); g_string_free(digests_secure, TRUE); } } else { free(op_key); } if (scheduler->priority_fencing_delay > 0 /* It's a suitable case where PCMK_OPT_PRIORITY_FENCING_DELAY * applies. At least add PCMK_OPT_PRIORITY_FENCING_DELAY field as * an indicator. */ && (priority_delay /* The priority delay needs to be recalculated if this function has * been called by schedule_fencing_and_shutdowns() after node * priority has already been calculated by native_add_running(). */ || g_hash_table_lookup(stonith_op->meta, PCMK_OPT_PRIORITY_FENCING_DELAY) != NULL)) { /* Add PCMK_OPT_PRIORITY_FENCING_DELAY to the fencing op even if * it's 0 for the targeting node. So that it takes precedence over * any possible `pcmk_delay_base/max`. */ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, scheduler)); g_hash_table_insert(stonith_op->meta, strdup(PCMK_OPT_PRIORITY_FENCING_DELAY), delay_s); } if(optional == FALSE && pe_can_fence(scheduler, node)) { pcmk__clear_action_flags(stonith_op, pcmk__action_optional); pe_action_set_reason(stonith_op, reason, false); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void pe_free_action(pcmk_action_t *action) { if (action == NULL) { return; } g_list_free_full(action->actions_before, free); g_list_free_full(action->actions_after, free); if (action->extra) { g_hash_table_destroy(action->extra); } if (action->meta) { g_hash_table_destroy(action->meta); } free(action->cancel_task); free(action->reason); free(action->task); free(action->uuid); free(action->node); free(action); } enum pcmk__action_type get_complex_task(const pcmk_resource_t *rsc, const char *name) { enum pcmk__action_type task = pcmk__parse_action(name); if (pcmk__is_primitive(rsc)) { switch (task) { case pcmk__action_stopped: case pcmk__action_started: case pcmk__action_demoted: case pcmk__action_promoted: crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); --task; break; default: break; } } return task; } /*! * \internal * \brief Find first matching action in a list * * \param[in] input List of actions to search * \param[in] uuid If not NULL, action must have this UUID * \param[in] task If not NULL, action must have this action name * \param[in] on_node If not NULL, action must be on this node * * \return First action in list that matches criteria, or NULL if none */ pcmk_action_t * find_first_action(const GList *input, const char *uuid, const char *task, const pcmk_node_t *on_node) { CRM_CHECK(uuid || task, return NULL); for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) { pcmk_action_t *action = (pcmk_action_t *) gIter->data; if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) { continue; } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) { continue; } else if (on_node == NULL) { return action; } else if (action->node == NULL) { continue; } else if (pcmk__same_node(on_node, action->node)) { return action; } } return NULL; } GList * find_actions(GList *input, const char *key, const pcmk_node_t *on_node) { GList *gIter = input; GList *result = NULL; CRM_CHECK(key != NULL, return NULL); for (; gIter != NULL; gIter = gIter->next) { pcmk_action_t *action = (pcmk_action_t *) gIter->data; if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) { continue; } else if (on_node == NULL) { crm_trace("Action %s matches (ignoring node)", key); result = g_list_prepend(result, action); } else if (action->node == NULL) { crm_trace("Action %s matches (unallocated, assigning to %s)", key, pcmk__node_name(on_node)); action->node = pe__copy_node(on_node); result = g_list_prepend(result, action); } else if (pcmk__same_node(on_node, action->node)) { crm_trace("Action %s on %s matches", key, pcmk__node_name(on_node)); result = g_list_prepend(result, action); } } return result; } GList * find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node) { GList *result = NULL; CRM_CHECK(key != NULL, return NULL); if (on_node == NULL) { return NULL; } for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { pcmk_action_t *action = (pcmk_action_t *) gIter->data; if ((action->node != NULL) && pcmk__str_eq(key, action->uuid, pcmk__str_casei) && pcmk__same_node(on_node, action->node)) { crm_trace("Action %s on %s matches", key, pcmk__node_name(on_node)); result = g_list_prepend(result, action); } } return result; } /*! * \brief Find all actions of given type for a resource * * \param[in] rsc Resource to search * \param[in] node Find only actions scheduled on this node * \param[in] task Action name to search for * \param[in] require_node If TRUE, NULL node or action node will not match * * \return List of actions found (or NULL if none) * \note If node is not NULL and require_node is FALSE, matching actions * without a node will be assigned to node. */ GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node) { GList *result = NULL; char *key = pcmk__op_key(rsc->id, task, 0); if (require_node) { result = find_actions_exact(rsc->private->actions, key, node); } else { result = find_actions(rsc->private->actions, key, node); } free(key); return result; } /*! * \internal * \brief Create an action reason string based on the action itself * * \param[in] action Action to create reason string for * \param[in] flag Action flag that was cleared * * \return Newly allocated string suitable for use as action reason * \note It is the caller's responsibility to free() the result. */ char * pe__action2reason(const pcmk_action_t *action, enum pcmk__action_flags flag) { const char *change = NULL; switch (flag) { case pcmk__action_runnable: change = "unrunnable"; break; case pcmk__action_migratable: change = "unmigrateable"; break; case pcmk__action_optional: change = "required"; break; default: // Bug: caller passed unsupported flag CRM_CHECK(change != NULL, change = ""); break; } return crm_strdup_printf("%s%s%s %s", change, (action->rsc == NULL)? "" : " ", (action->rsc == NULL)? "" : action->rsc->id, action->task); } void pe_action_set_reason(pcmk_action_t *action, const char *reason, bool overwrite) { if (action->reason != NULL && overwrite) { pcmk__rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, pcmk__s(reason, "(none)")); } else if (action->reason == NULL) { pcmk__rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, pcmk__s(reason, "(none)")); } else { // crm_assert(action->reason != NULL && !overwrite); return; } pcmk__str_update(&action->reason, reason); } /*! * \internal * \brief Create an action to clear a resource's history from CIB * * \param[in,out] rsc Resource to clear * \param[in] node Node to clear history on */ void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node) { CRM_ASSERT((rsc != NULL) && (node != NULL)); custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0), PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->private->scheduler); } #define sort_return(an_int, why) do { \ free(a_uuid); \ free(b_uuid); \ crm_trace("%s (%d) %c %s (%d) : %s", \ a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ b_xml_id, b_call_id, why); \ return an_int; \ } while(0) int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, bool same_node_default) { int a_call_id = -1; int b_call_id = -1; char *a_uuid = NULL; char *b_uuid = NULL; const char *a_xml_id = crm_element_value(xml_a, PCMK_XA_ID); const char *b_xml_id = crm_element_value(xml_b, PCMK_XA_ID); const char *a_node = crm_element_value(xml_a, PCMK__META_ON_NODE); const char *b_node = crm_element_value(xml_b, PCMK__META_ON_NODE); bool same_node = true; /* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via * 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c). * * In case that any of the PCMK__XE_LRM_RSC_OP entries doesn't have on_node * attribute, we need to explicitly tell whether the two operations are on * the same node. */ if (a_node == NULL || b_node == NULL) { same_node = same_node_default; } else { same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei); } if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) { /* We have duplicate PCMK__XE_LRM_RSC_OP entries in the status * section which is unlikely to be a good thing * - we can handle it easily enough, but we need to get * to the bottom of why it's happening. */ pcmk__config_err("Duplicate " PCMK__XE_LRM_RSC_OP " entries named %s", a_xml_id); sort_return(0, "duplicate"); } crm_element_value_int(xml_a, PCMK__XA_CALL_ID, &a_call_id); crm_element_value_int(xml_b, PCMK__XA_CALL_ID, &b_call_id); if (a_call_id == -1 && b_call_id == -1) { /* both are pending ops so it doesn't matter since * stops are never pending */ sort_return(0, "pending"); } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) { sort_return(-1, "call id"); } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) { sort_return(1, "call id"); } else if (a_call_id >= 0 && b_call_id >= 0 && (!same_node || a_call_id == b_call_id)) { /* The op and last_failed_op are the same. Order on * PCMK_XA_LAST_RC_CHANGE. */ time_t last_a = -1; time_t last_b = -1; crm_element_value_epoch(xml_a, PCMK_XA_LAST_RC_CHANGE, &last_a); crm_element_value_epoch(xml_b, PCMK_XA_LAST_RC_CHANGE, &last_b); crm_trace("rc-change: %lld vs %lld", (long long) last_a, (long long) last_b); if (last_a >= 0 && last_a < last_b) { sort_return(-1, "rc-change"); } else if (last_b >= 0 && last_a > last_b) { sort_return(1, "rc-change"); } sort_return(0, "rc-change"); } else { /* One of the inputs is a pending operation. * Attempt to use PCMK__XA_TRANSITION_MAGIC to determine its age relative * to the other. */ int a_id = -1; int b_id = -1; const char *a_magic = crm_element_value(xml_a, PCMK__XA_TRANSITION_MAGIC); const char *b_magic = crm_element_value(xml_b, PCMK__XA_TRANSITION_MAGIC); CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic a"); } if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, NULL)) { sort_return(0, "bad magic b"); } /* try to determine the relative age of the operation... * some pending operations (e.g. a start) may have been superseded * by a subsequent stop * * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last */ if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) { /* * some of the logic in here may be redundant... * * if the UUID from the TE doesn't match then one better * be a pending operation. * pending operations don't survive between elections and joins * because we query the LRM directly */ if (b_call_id == -1) { sort_return(-1, "transition + call"); } else if (a_call_id == -1) { sort_return(1, "transition + call"); } } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { sort_return(-1, "transition"); } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { sort_return(1, "transition"); } } /* we should never end up here */ CRM_CHECK(FALSE, sort_return(0, "default")); } gint sort_op_by_callid(gconstpointer a, gconstpointer b) { const xmlNode *xml_a = a; const xmlNode *xml_b = b; return pe__is_newer_op(xml_a, xml_b, true); } /*! * \internal * \brief Create a new pseudo-action for a resource * * \param[in,out] rsc Resource to create action for * \param[in] task Action name * \param[in] optional Whether action should be considered optional * \param[in] runnable Whethe action should be considered runnable * * \return New action object corresponding to arguments */ pcmk_action_t * pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional, bool runnable) { pcmk_action_t *action = NULL; CRM_ASSERT((rsc != NULL) && (task != NULL)); action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL, optional, rsc->private->scheduler); pcmk__set_action_flags(action, pcmk__action_pseudo); if (runnable) { pcmk__set_action_flags(action, pcmk__action_runnable); } return action; } /*! * \internal * \brief Add the expected result to an action * * \param[in,out] action Action to add expected result to * \param[in] expected_result Expected result to add * * \note This is more efficient than calling pcmk__insert_meta(). */ void pe__add_action_expected_result(pcmk_action_t *action, int expected_result) { CRM_ASSERT((action != NULL) && (action->meta != NULL)); g_hash_table_insert(action->meta, pcmk__str_copy(PCMK__META_OP_TARGET_RC), pcmk__itoa(expected_result)); }