diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 33635eb436..1325c6d04b 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -1,618 +1,631 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PE_INTERNAL__H # define PE_INTERNAL__H # include # include # include # include # include # include # include enum pe__clone_flags { // Whether instances should be started sequentially pe__clone_ordered = (1 << 0), // Whether promotion scores have been added pe__clone_promotion_added = (1 << 1), // Whether promotion constraints have been added pe__clone_promotion_constrained = (1 << 2), }; bool pe__clone_is_ordered(pe_resource_t *clone); int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag); # define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "", fmt, ##args) # define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "", fmt, ##args) # define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "", fmt, ##args) # define pe_err(fmt...) do { \ was_processing_error = TRUE; \ pcmk__config_err(fmt); \ } while (0) # define pe_warn(fmt...) do { \ was_processing_warning = TRUE; \ pcmk__config_warn(fmt); \ } while (0) # define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); } # define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); } #define pe__set_working_set_flags(working_set, flags_to_set) do { \ (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Working set", crm_system_name, \ (working_set)->flags, (flags_to_set), #flags_to_set); \ } while (0) #define pe__clear_working_set_flags(working_set, flags_to_clear) do { \ (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Working set", crm_system_name, \ (working_set)->flags, (flags_to_clear), #flags_to_clear); \ } while (0) #define pe__set_resource_flags(resource, flags_to_set) do { \ (resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \ (flags_to_set), #flags_to_set); \ } while (0) #define pe__clear_resource_flags(resource, flags_to_clear) do { \ (resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \ (flags_to_clear), #flags_to_clear); \ } while (0) #define pe__set_action_flags(action, flags_to_set) do { \ (action)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Action", (action)->uuid, \ (action)->flags, \ (flags_to_set), \ #flags_to_set); \ } while (0) #define pe__clear_action_flags(action, flags_to_clear) do { \ (action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Action", (action)->uuid, \ (action)->flags, \ (flags_to_clear), \ #flags_to_clear); \ } while (0) #define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \ action_flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Action", action_name, \ (action_flags), \ (flags_to_set), #flags_to_set); \ } while (0) #define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \ action_flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Action", action_name, \ (action_flags), \ (flags_to_clear), \ #flags_to_clear); \ } while (0) #define pe__set_action_flags_as(function, line, action, flags_to_set) do { \ (action)->flags = pcmk__set_flags_as((function), (line), \ LOG_TRACE, \ "Action", (action)->uuid, \ (action)->flags, \ (flags_to_set), \ #flags_to_set); \ } while (0) #define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \ (action)->flags = pcmk__clear_flags_as((function), (line), \ LOG_TRACE, \ "Action", (action)->uuid, \ (action)->flags, \ (flags_to_clear), \ #flags_to_clear); \ } while (0) #define pe__set_order_flags(order_flags, flags_to_set) do { \ order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Ordering", "constraint", \ order_flags, (flags_to_set), \ #flags_to_set); \ } while (0) #define pe__clear_order_flags(order_flags, flags_to_clear) do { \ order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Ordering", "constraint", \ order_flags, (flags_to_clear), \ #flags_to_clear); \ } while (0) #define pe__set_graph_flags(graph_flags, gr_action, flags_to_set) do { \ graph_flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Graph", \ (gr_action)->uuid, graph_flags, \ (flags_to_set), #flags_to_set); \ } while (0) #define pe__clear_graph_flags(graph_flags, gr_action, flags_to_clear) do { \ graph_flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Graph", \ (gr_action)->uuid, graph_flags, \ (flags_to_clear), #flags_to_clear); \ } while (0) // Some warnings we don't want to print every transition enum pe_warn_once_e { pe_wo_blind = (1 << 0), pe_wo_restart_type = (1 << 1), pe_wo_role_after = (1 << 2), pe_wo_poweroff = (1 << 3), pe_wo_require_all = (1 << 4), pe_wo_order_score = (1 << 5), pe_wo_neg_threshold = (1 << 6), pe_wo_remove_after = (1 << 7), pe_wo_ping_node = (1 << 8), }; extern uint32_t pe_wo; #define pe_warn_once(pe_wo_bit, fmt...) do { \ if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \ if (pe_wo_bit == pe_wo_blind) { \ crm_warn(fmt); \ } else { \ pe_warn(fmt); \ } \ pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Warn-once", "logging", pe_wo, \ (pe_wo_bit), #pe_wo_bit); \ } \ } while (0); typedef struct pe__location_constraint_s { char *id; // Constraint XML ID pe_resource_t *rsc_lh; // Resource being located enum rsc_role_e role_filter; // Role to locate enum pe_discover_e discover_mode; // Resource discovery GList *node_list_rh; // List of pe_node_t* } pe__location_t; typedef struct pe__order_constraint_s { int id; enum pe_ordering type; void *lh_opaque; pe_resource_t *lh_rsc; pe_action_t *lh_action; char *lh_action_task; void *rh_opaque; pe_resource_t *rh_rsc; pe_action_t *rh_action; char *rh_action_task; } pe__ordering_t; typedef struct notify_data_s { GSList *keys; // Environment variable name/value pairs const char *action; pe_action_t *pre; pe_action_t *post; pe_action_t *pre_done; pe_action_t *post_done; GList *active; /* notify_entry_t* */ GList *inactive; /* notify_entry_t* */ GList *start; /* notify_entry_t* */ GList *stop; /* notify_entry_t* */ GList *demote; /* notify_entry_t* */ GList *promote; /* notify_entry_t* */ GList *promoted; /* notify_entry_t* */ GList *unpromoted; /* notify_entry_t* */ GHashTable *allowed_nodes; } notify_data_t; int pe__clone_promoted_max(pe_resource_t *clone); int pe__clone_promoted_node_max(pe_resource_t *clone); +pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, + bool optional, bool runnable); + bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node); void add_hash_param(GHashTable * hash, const char *name, const char *value); char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name, pe_working_set_t * data_set); pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current); void pe_metadata(pcmk__output_t *out); void verify_pe_options(GHashTable * options); void common_update_score(pe_resource_t * rsc, const char *id, int score); void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed); gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set); gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set); gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set); gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set); pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node, int flags); gboolean native_active(pe_resource_t * rsc, gboolean all); gboolean group_active(pe_resource_t * rsc, gboolean all); gboolean clone_active(pe_resource_t * rsc, gboolean all); gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all); void native_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data); void group_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data); void clone_print(pe_resource_t * rsc, const char *pre_text, long options, void *print_data); void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, void *print_data); gchar * pcmk__native_output_string(pe_resource_t *rsc, const char *name, pe_node_t *node, uint32_t show_opts, const char *target_role, bool show_nodes); int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name , size_t pairs_count, ...); char *pe__node_display_name(pe_node_t *node, bool print_detail); + +// Clone notifications (pe_notif.c) +void pe__create_notifications(pe_resource_t *rsc, notify_data_t *n_data); +notify_data_t *pe__clone_notif_pseudo_ops(pe_resource_t *rsc, const char *task, + pe_action_t *action, + pe_action_t *complete); +void pe__free_notification_data(notify_data_t *n_data); +void pe__order_notifs_after_fencing(pe_action_t *action, pe_resource_t *rsc, + pe_action_t *stonith_op); + + static inline const char * pe__rsc_bool_str(pe_resource_t *rsc, uint64_t rsc_flag) { return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag)); } int pe__clone_xml(pcmk__output_t *out, va_list args); int pe__clone_default(pcmk__output_t *out, va_list args); int pe__group_xml(pcmk__output_t *out, va_list args); int pe__group_default(pcmk__output_t *out, va_list args); int pe__bundle_xml(pcmk__output_t *out, va_list args); int pe__bundle_html(pcmk__output_t *out, va_list args); int pe__bundle_text(pcmk__output_t *out, va_list args); int pe__node_html(pcmk__output_t *out, va_list args); int pe__node_text(pcmk__output_t *out, va_list args); int pe__node_xml(pcmk__output_t *out, va_list args); int pe__resource_xml(pcmk__output_t *out, va_list args); int pe__resource_html(pcmk__output_t *out, va_list args); int pe__resource_text(pcmk__output_t *out, va_list args); void native_free(pe_resource_t * rsc); void group_free(pe_resource_t * rsc); void clone_free(pe_resource_t * rsc); void pe__free_bundle(pe_resource_t *rsc); enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current); enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current); enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current); enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current); void pe__count_common(pe_resource_t *rsc); void pe__count_bundle(pe_resource_t *rsc); gboolean common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, pe_resource_t * parent, pe_working_set_t * data_set); void common_free(pe_resource_t * rsc); pe_node_t *pe__copy_node(const pe_node_t *this_node); extern time_t get_effective_time(pe_working_set_t * data_set); /* Failure handling utilities (from failcounts.c) */ // bit flags for fail count handling options enum pe_fc_flags_e { pe_fc_default = (1 << 0), pe_fc_effective = (1 << 1), // don't count expired failures pe_fc_fillers = (1 << 2), // if container, include filler failures in count }; int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure, uint32_t flags, xmlNode *xml_op, pe_working_set_t *data_set); pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_working_set_t *data_set); /* Functions for finding/counting a resource's active nodes */ pe_node_t *pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean); pe_node_t *pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count); static inline pe_node_t * pe__current_node(const pe_resource_t *rsc) { return pe__find_active_on(rsc, NULL, NULL); } /* Binary like operators for lists of nodes */ extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores); GHashTable *pe__node_list2table(GList *list); static inline gpointer pe_hash_table_lookup(GHashTable * hash, gconstpointer key) { if (hash) { return g_hash_table_lookup(hash, key); } return NULL; } extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set); extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order); /* Printing functions for debug */ extern void print_str_str(gpointer key, gpointer value, gpointer user_data); extern void pe__output_node(pe_node_t * node, gboolean details, pcmk__output_t *out); void pe__show_node_weights_as(const char *file, const char *function, int line, bool to_log, pe_resource_t *rsc, const char *comment, GHashTable *nodes, pe_working_set_t *data_set); #define pe__show_node_weights(level, rsc, text, nodes, data_set) \ pe__show_node_weights_as(__FILE__, __func__, __LINE__, \ (level), (rsc), (text), (nodes), (data_set)) extern gint sort_rsc_priority(gconstpointer a, gconstpointer b); extern xmlNode *find_rsc_op_entry(pe_resource_t * rsc, const char *key); extern pe_action_t *custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node, gboolean optional, gboolean foo, pe_working_set_t * data_set); # define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0) # define delete_action(rsc, node, optional) custom_action( \ rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \ optional, TRUE, data_set); # define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0) # define stopped_action(rsc, node, optional) custom_action( \ rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \ optional, TRUE, data_set); # define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0) # define stop_action(rsc, node, optional) custom_action( \ rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \ optional, TRUE, data_set); # define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0) # define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0) # define start_action(rsc, node, optional) custom_action( \ rsc, start_key(rsc), CRMD_ACTION_START, node, \ optional, TRUE, data_set) # define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0) # define started_action(rsc, node, optional) custom_action( \ rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \ optional, TRUE, data_set) # define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0) # define promote_action(rsc, node, optional) custom_action( \ rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \ optional, TRUE, data_set) # define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0) # define promoted_action(rsc, node, optional) custom_action( \ rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \ optional, TRUE, data_set) # define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0) # define demote_action(rsc, node, optional) custom_action( \ rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \ optional, TRUE, data_set) # define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0) # define demoted_action(rsc, node, optional) custom_action( \ rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \ optional, TRUE, data_set) extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set); extern pe_action_t *find_first_action(GList *input, const char *uuid, const char *task, pe_node_t * on_node); extern enum action_tasks get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic); extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node); GList *find_actions_exact(GList *input, const char *key, const pe_node_t *on_node); -extern GList *find_recurring_actions(GList *input, pe_node_t * not_on_node); GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node); extern void pe_free_action(pe_action_t * action); extern void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag, pe_working_set_t * data_set); extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, bool same_node_default); extern gint sort_op_by_callid(gconstpointer a, gconstpointer b); extern gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e *role); void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why); extern pe_resource_t *find_clone_instance(pe_resource_t * rsc, const char *sub_id, pe_working_set_t * data_set); extern void destroy_ticket(gpointer data); extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set); // Resources for manipulating resource names const char *pe_base_name_end(const char *id); char *clone_strip(const char *last_rsc_id); char *clone_zero(const char *last_rsc_id); static inline bool pe_base_name_eq(pe_resource_t *rsc, const char *id) { if (id && rsc && rsc->id) { // Number of characters in rsc->id before any clone suffix size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1; return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len); } return false; } int pe__target_rc_from_xml(xmlNode *xml_op); gint sort_node_uname(gconstpointer a, gconstpointer b); bool is_set_recursive(pe_resource_t * rsc, long long flag, bool any); enum rsc_digest_cmp_val { /*! Digests are the same */ RSC_DIGEST_MATCH = 0, /*! Params that require a restart changed */ RSC_DIGEST_RESTART, /*! Some parameter changed. */ RSC_DIGEST_ALL, /*! rsc op didn't have a digest associated with it, so * it is unknown if parameters changed or not. */ RSC_DIGEST_UNKNOWN, }; typedef struct op_digest_cache_s { enum rsc_digest_cmp_val rc; xmlNode *params_all; xmlNode *params_secure; xmlNode *params_restart; char *digest_all_calc; char *digest_secure_calc; char *digest_restart_calc; } op_digest_cache_t; op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms, pe_node_t *node, xmlNode *xml_op, GHashTable *overrides, bool calc_secure, pe_working_set_t *data_set); void pe__free_digests(gpointer ptr); op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * node, pe_working_set_t * data_set); pe_action_t *pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set); void trigger_unfencing( pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set); char *pe__action2reason(pe_action_t *action, enum pe_action_flags flag); void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite); void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags); void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags); void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag); gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref); void print_rscs_brief(GList *rsc_list, const char * pre_text, long options, void * print_data, gboolean print_all); int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options); void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay); pe_node_t *pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t * data_set); void common_print(pe_resource_t * rsc, const char *pre_text, const char *name, pe_node_t *node, long options, void *print_data); int pe__common_output_text(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, unsigned int options); int pe__common_output_html(pcmk__output_t *out, pe_resource_t * rsc, const char *name, pe_node_t *node, unsigned int options); pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node); bool pe__bundle_needs_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set); const char *pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set, xmlNode *xml, const char *field); const char *pe_node_attribute_calculated(const pe_node_t *node, const char *name, const pe_resource_t *rsc); const char *pe_node_attribute_raw(pe_node_t *node, const char *name); bool pe__is_universal_clone(pe_resource_t *rsc, pe_working_set_t *data_set); void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node, enum pe_check_parameters, pe_working_set_t *data_set); void pe__foreach_param_check(pe_working_set_t *data_set, void (*cb)(pe_resource_t*, pe_node_t*, xmlNode*, enum pe_check_parameters, pe_working_set_t*)); void pe__free_param_checks(pe_working_set_t *data_set); bool pe__shutdown_requested(pe_node_t *node); void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set); /*! * \internal * \brief Register xml formatting message functions. */ void pe__register_messages(pcmk__output_t *out); void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set); bool pe__resource_is_disabled(pe_resource_t *rsc); pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set); GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name); GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name); bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag); bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag); bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list); GList *pe__filter_rsc_list(GList *rscs, GList *filter); GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s); GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s); bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node); gboolean pe__bundle_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__clone_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__group_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent); gboolean pe__native_is_filtered(pe_resource_t *rsc, GList *only_rsc, gboolean check_parent); xmlNode *pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name); const char *pe__clone_child_id(pe_resource_t *rsc); int pe__sum_node_health_scores(const pe_node_t *node, int base_health); int pe__node_health(pe_node_t *node); static inline enum pcmk__health_strategy pe__health_strategy(pe_working_set_t *data_set) { return pcmk__parse_health_strategy(pe_pref(data_set->config_hash, PCMK__OPT_NODE_HEALTH_STRATEGY)); } static inline int pe__health_score(const char *option, pe_working_set_t *data_set) { return char2score(pe_pref(data_set->config_hash, option)); } #endif diff --git a/lib/pacemaker/Makefile.am b/lib/pacemaker/Makefile.am index 3164f44c85..94591ffadc 100644 --- a/lib/pacemaker/Makefile.am +++ b/lib/pacemaker/Makefile.am @@ -1,65 +1,64 @@ # # Copyright 2004-2022 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk AM_CPPFLAGS += -I$(top_builddir) -I$(top_srcdir) noinst_HEADERS = libpacemaker_private.h ## libraries lib_LTLIBRARIES = libpacemaker.la ## SOURCES libpacemaker_la_LDFLAGS = -version-info 5:0:4 libpacemaker_la_CFLAGS = $(CFLAGS_HARDENED_LIB) libpacemaker_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) libpacemaker_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/lrmd/liblrmd.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(top_builddir)/lib/services/libcrmservice.la \ $(top_builddir)/lib/common/libcrmcommon.la # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version # Use += rather than backlashed continuation lines for parsing by bumplibs libpacemaker_la_SOURCES = libpacemaker_la_SOURCES += pcmk_acl.c libpacemaker_la_SOURCES += pcmk_cluster_queries.c libpacemaker_la_SOURCES += pcmk_fence.c libpacemaker_la_SOURCES += pcmk_graph_consumer.c libpacemaker_la_SOURCES += pcmk_graph_logging.c libpacemaker_la_SOURCES += pcmk_graph_producer.c libpacemaker_la_SOURCES += pcmk_injections.c libpacemaker_la_SOURCES += pcmk_output.c libpacemaker_la_SOURCES += pcmk_resource.c libpacemaker_la_SOURCES += pcmk_sched_actions.c libpacemaker_la_SOURCES += pcmk_sched_allocate.c libpacemaker_la_SOURCES += pcmk_sched_bundle.c libpacemaker_la_SOURCES += pcmk_sched_clone.c libpacemaker_la_SOURCES += pcmk_sched_colocation.c libpacemaker_la_SOURCES += pcmk_sched_constraints.c libpacemaker_la_SOURCES += pcmk_sched_fencing.c libpacemaker_la_SOURCES += pcmk_sched_group.c libpacemaker_la_SOURCES += pcmk_sched_location.c libpacemaker_la_SOURCES += pcmk_sched_native.c libpacemaker_la_SOURCES += pcmk_sched_nodes.c -libpacemaker_la_SOURCES += pcmk_sched_notif.c libpacemaker_la_SOURCES += pcmk_sched_ordering.c libpacemaker_la_SOURCES += pcmk_sched_probes.c libpacemaker_la_SOURCES += pcmk_sched_promotable.c libpacemaker_la_SOURCES += pcmk_sched_remote.c libpacemaker_la_SOURCES += pcmk_sched_resource.c libpacemaker_la_SOURCES += pcmk_sched_tickets.c libpacemaker_la_SOURCES += pcmk_sched_utilization.c libpacemaker_la_SOURCES += pcmk_simulate.c libpacemaker_la_SOURCES += pcmk_status.c diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h index b1a77ccc10..3f3d35abb2 100644 --- a/lib/pacemaker/libpacemaker_private.h +++ b/lib/pacemaker/libpacemaker_private.h @@ -1,447 +1,424 @@ /* * Copyright 2021-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__LIBPACEMAKER_PRIVATE__H # define PCMK__LIBPACEMAKER_PRIVATE__H /* This header is for the sole use of libpacemaker, so that functions can be * declared with G_GNUC_INTERNAL for efficiency. */ #include // pe_action_t, pe_node_t, pe_working_set_t // Actions (pcmk_sched_actions.c) G_GNUC_INTERNAL void pcmk__update_action_for_orderings(pe_action_t *action, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details); -G_GNUC_INTERNAL -pe_action_t *pcmk__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, - bool optional, bool runnable); - G_GNUC_INTERNAL pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node); G_GNUC_INTERNAL pe_action_t *pcmk__new_shutdown_action(pe_node_t *node); G_GNUC_INTERNAL bool pcmk__action_locks_rsc_to_node(const pe_action_t *action); G_GNUC_INTERNAL void pcmk__deduplicate_action_inputs(pe_action_t *action); G_GNUC_INTERNAL void pcmk__output_actions(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op); G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set); // Producing transition graphs (pcmk_graph_producer.c) G_GNUC_INTERNAL bool pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action, pe_action_wrapper_t *input); G_GNUC_INTERNAL void pcmk__add_action_to_graph(pe_action_t *action, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__create_graph(pe_working_set_t *data_set); // Fencing (pcmk_sched_fencing.c) G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node); G_GNUC_INTERNAL bool pcmk__node_unfenced(pe_node_t *node); // Injected scheduler inputs (pcmk_sched_injections.c) void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib, pcmk_injections_t *injections); // Constraints of any type (pcmk_sched_constraints.c) G_GNUC_INTERNAL pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id); G_GNUC_INTERNAL xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__valid_resource_or_tag(pe_working_set_t *data_set, const char *id, pe_resource_t **rsc, pe_tag_t **tag); G_GNUC_INTERNAL bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr, bool convert_rsc, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__create_internal_constraints(pe_working_set_t *data_set); // Location constraints G_GNUC_INTERNAL void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc, int node_weight, const char *discover_mode, pe_node_t *foo_node, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc); // Colocation constraints (pcmk_sched_colocation.c) enum pcmk__coloc_affects { pcmk__coloc_affects_nothing = 0, pcmk__coloc_affects_location, pcmk__coloc_affects_role, }; G_GNUC_INTERNAL enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, bool preview); G_GNUC_INTERNAL void pcmk__apply_coloc_to_weights(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint); G_GNUC_INTERNAL void pcmk__apply_coloc_to_priority(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint); G_GNUC_INTERNAL void pcmk__apply_colocation(pcmk__colocation_t *colocation, pe_resource_t *rsc1, pe_resource_t *rsc2, uint32_t flags); G_GNUC_INTERNAL void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *dependent, pe_resource_t *primary, const char *dependent_role, const char *primary_role, bool influence, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__block_colocated_starts(pe_action_t *action, pe_working_set_t *data_set); /*! * \internal * \brief Check whether colocation's left-hand preferences should be considered * * \param[in] colocation Colocation constraint * \param[in] rsc Right-hand instance (normally this will be * colocation->primary, which NULL will be treated as, * but for clones or bundles with multiple instances * this can be a particular instance) * * \return true if colocation influence should be effective, otherwise false */ static inline bool pcmk__colocation_has_influence(const pcmk__colocation_t *colocation, const pe_resource_t *rsc) { if (rsc == NULL) { rsc = colocation->primary; } /* The left hand of a colocation influences the right hand's location * if the influence option is true, or the right hand is not yet active. */ return colocation->influence || (rsc->running_on == NULL); } // Ordering constraints (pcmk_sched_ordering.c) G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__disable_invalid_orderings(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_orderings(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__order_after_each(pe_action_t *after, GList *list); /*! * \internal * \brief Create a new ordering between two resource actions * * \param[in] lh_rsc Resource for 'first' action * \param[in] rh_rsc Resource for 'then' action * \param[in] lh_task Action key for 'first' action * \param[in] rh_task Action key for 'then' action * \param[in] flags Bitmask of enum pe_ordering flags * \param[in] data_set Cluster working set to add ordering to */ #define pcmk__order_resource_actions(lh_rsc, lh_task, rh_rsc, rh_task, \ flags, data_set) \ pcmk__new_ordering((lh_rsc), pcmk__op_key((lh_rsc)->id, (lh_task), 0), \ NULL, \ (rh_rsc), pcmk__op_key((rh_rsc)->id, (rh_task), 0), \ NULL, (flags), (data_set)) #define pcmk__order_starts(rsc1, rsc2, type, data_set) \ pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \ (rsc2), CRMD_ACTION_START, (type), (data_set)) #define pcmk__order_stops(rsc1, rsc2, type, data_set) \ pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \ (rsc2), CRMD_ACTION_STOP, (type), (data_set)) // Ticket constraints (pcmk_sched_tickets.c) G_GNUC_INTERNAL void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set); // Promotable clone resources (pcmk_sched_promotable.c) G_GNUC_INTERNAL void pcmk__require_promotion_tickets(pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__set_instance_roles(pe_resource_t *rsc); // Pacemaker Remote nodes (pcmk_sched_remote.c) G_GNUC_INTERNAL bool pcmk__is_failed_remote_node(pe_node_t *node); G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node); G_GNUC_INTERNAL pe_node_t *pcmk__connection_host_for_action(pe_action_t *action); G_GNUC_INTERNAL void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params); G_GNUC_INTERNAL void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action); // Groups (pcmk_sched_group.c) G_GNUC_INTERNAL GList *pcmk__group_colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *colocated_rscs); // Bundles (pcmk_sched_bundle.c) G_GNUC_INTERNAL void pcmk__output_bundle_actions(pe_resource_t *rsc); // Injections (pcmk_injections.c) G_GNUC_INTERNAL xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid); G_GNUC_INTERNAL xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up); G_GNUC_INTERNAL xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *lrm_name, const char *rclass, const char *rtype, const char *rprovider); G_GNUC_INTERNAL void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *task, guint interval_ms, int rc); G_GNUC_INTERNAL xmlNode *pcmk__inject_action_result(xmlNode *cib_resource, lrmd_event_data_t *op, int target_rc); // Nodes (pcmk_sched_nodes.c) G_GNUC_INTERNAL bool pcmk__node_available(const pe_node_t *node, bool consider_score, bool consider_guest); G_GNUC_INTERNAL bool pcmk__any_node_available(GHashTable *nodes); G_GNUC_INTERNAL GHashTable *pcmk__copy_node_table(GHashTable *nodes); G_GNUC_INTERNAL GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__apply_node_health(pe_working_set_t *data_set); G_GNUC_INTERNAL pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node); -// Clone notifictions (pcmk_sched_notif.c) - -G_GNUC_INTERNAL -void pcmk__create_notifications(pe_resource_t *rsc, notify_data_t *n_data); - -G_GNUC_INTERNAL -notify_data_t *pcmk__clone_notif_pseudo_ops(pe_resource_t *rsc, - const char *task, - pe_action_t *action, - pe_action_t *complete); - -G_GNUC_INTERNAL -void pcmk__free_notification_data(notify_data_t *n_data); - -G_GNUC_INTERNAL -void pcmk__order_notifs_after_fencing(pe_action_t *action, pe_resource_t *rsc, - pe_action_t *stonith_op); - - // Functions applying to more than one variant (pcmk_sched_resource.c) G_GNUC_INTERNAL void pcmk__set_allocation_methods(pe_working_set_t *data_set); G_GNUC_INTERNAL bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_entry, bool active_on_node); G_GNUC_INTERNAL GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set); G_GNUC_INTERNAL GList *pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *colocated_rscs); G_GNUC_INTERNAL void pcmk__output_resource_actions(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force); G_GNUC_INTERNAL bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force); G_GNUC_INTERNAL void pcmk__unassign_resource(pe_resource_t *rsc); G_GNUC_INTERNAL bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node, pe_resource_t **failed); G_GNUC_INTERNAL void pcmk__sort_resources(pe_working_set_t *data_set); G_GNUC_INTERNAL gint pcmk__cmp_instance(gconstpointer a, gconstpointer b); G_GNUC_INTERNAL gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b); // Functions related to probes (pcmk_sched_probes.c) G_GNUC_INTERNAL void pcmk__order_probes(pe_working_set_t *data_set); G_GNUC_INTERNAL void pcmk__schedule_probes(pe_working_set_t *data_set); // Functions related to node utilization (pcmk_sched_utilization.c) G_GNUC_INTERNAL int pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2); G_GNUC_INTERNAL void pcmk__consume_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__release_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc); G_GNUC_INTERNAL void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer); G_GNUC_INTERNAL void pcmk__create_utilization_constraints(pe_resource_t *rsc, GList *allowed_nodes); G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set); #endif // PCMK__LIBPACEMAKER_PRIVATE__H diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c index dacd8c5f0f..6ec5ded2e8 100644 --- a/lib/pacemaker/pcmk_sched_actions.c +++ b/lib/pacemaker/pcmk_sched_actions.c @@ -1,1763 +1,1735 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include "libpacemaker_private.h" extern gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set); /*! * \internal * \brief Get the action flags relevant to ordering constraints * * \param[in] action Action to check * \param[in] node Node that *other* action in the ordering is on * (used only for clone resource actions) * * \return Action flags that should be used for orderings */ static enum pe_action_flags action_flags_for_ordering(pe_action_t *action, pe_node_t *node) { bool runnable = false; enum pe_action_flags flags; // For non-resource actions, return the action flags if (action->rsc == NULL) { return action->flags; } /* For non-clone resources, or a clone action not assigned to a node, * return the flags as determined by the resource method without a node * specified. */ flags = action->rsc->cmds->action_flags(action, NULL); if ((node == NULL) || !pe_rsc_is_clone(action->rsc)) { return flags; } /* Otherwise (i.e., for clone resource actions on a specific node), first * remember whether the non-node-specific action is runnable. */ runnable = pcmk_is_set(flags, pe_action_runnable); // Then recheck the resource method with the node flags = action->rsc->cmds->action_flags(action, node); /* For clones in ordering constraints, the node-specific "runnable" doesn't * matter, just the non-node-specific setting (i.e., is the action runnable * anywhere). * * This applies only to runnable, and only for ordering constraints. This * function shouldn't be used for other types of constraints without * changes. Not very satisfying, but it's logical and appears to work well. */ if (runnable && !pcmk_is_set(flags, pe_action_runnable)) { pe__set_raw_action_flags(flags, action->rsc->id, pe_action_runnable); } return flags; } /*! * \internal * \brief Get action UUID that should be used with a resource ordering * * When an action is ordered relative to an action for a collective resource * (clone, group, or bundle), it actually needs to be ordered after all * instances of the collective have completed the relevant action (for example, * given "start CLONE then start RSC", RSC must wait until all instances of * CLONE have started). Given the UUID and resource of the first action in an * ordering, this returns the UUID of the action that should actually be used * for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0"). * * \param[in] first_uuid UUID of first action in ordering * \param[in] first_rsc Resource of first action in ordering * * \return Newly allocated copy of UUID to use with ordering * \note It is the caller's responsibility to free the return value. */ static char * action_uuid_for_ordering(const char *first_uuid, pe_resource_t *first_rsc) { guint interval_ms = 0; char *uuid = NULL; char *rid = NULL; char *first_task_str = NULL; enum action_tasks first_task = no_action; enum action_tasks remapped_task = no_action; // Only non-notify actions for collective resources need remapping if ((strstr(first_uuid, "notify") != NULL) || (first_rsc->variant < pe_group)) { goto done; } // Only non-recurring actions need remapping CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms)); if (interval_ms > 0) { goto done; } first_task = text2task(first_task_str); switch (first_task) { case stop_rsc: case start_rsc: case action_notify: case action_promote: case action_demote: remapped_task = first_task + 1; break; case stopped_rsc: case started_rsc: case action_notified: case action_promoted: case action_demoted: remapped_task = first_task; break; case monitor_rsc: case shutdown_crm: case stonith_node: break; default: crm_err("Unknown action '%s' in ordering", first_task_str); break; } if (remapped_task != no_action) { /* If a (clone) resource has notifications enabled, we want to order * relative to when all notifications have been sent for the remapped * task. Only outermost resources or those in bundles have * notifications. */ if (pcmk_is_set(first_rsc->flags, pe_rsc_notify) && ((first_rsc->parent == NULL) || (pe_rsc_is_clone(first_rsc) && (first_rsc->parent->variant == pe_container)))) { uuid = pcmk__notify_key(rid, "confirmed-post", task2text(remapped_task)); } else { uuid = pcmk__op_key(rid, task2text(remapped_task), 0); } pe_rsc_trace(first_rsc, "Remapped action UUID %s to %s for ordering purposes", first_uuid, uuid); } done: if (uuid == NULL) { uuid = strdup(first_uuid); CRM_ASSERT(uuid != NULL); } free(first_task_str); free(rid); return uuid; } /*! * \internal * \brief Get actual action that should be used with an ordering * * When an action is ordered relative to an action for a collective resource * (clone, group, or bundle), it actually needs to be ordered after all * instances of the collective have completed the relevant action (for example, * given "start CLONE then start RSC", RSC must wait until all instances of * CLONE have started). Given the first action in an ordering, this returns the * the action that should actually be used for ordering (for example, the * started action instead of the start action). * * \param[in] action First action in an ordering * * \return Actual action that should be used for the ordering */ static pe_action_t * action_for_ordering(pe_action_t *action) { pe_action_t *result = action; pe_resource_t *rsc = action->rsc; if ((rsc != NULL) && (rsc->variant >= pe_group) && (action->uuid != NULL)) { char *uuid = action_uuid_for_ordering(action->uuid, rsc); result = find_first_action(rsc->actions, uuid, NULL, NULL); if (result == NULL) { crm_warn("Not remapping %s to %s because %s does not have " "remapped action", action->uuid, uuid, rsc->id); result = action; } free(uuid); } return result; } /*! * \internal * \brief Update flags for ordering's actions appropriately for ordering's flags * * \param[in] first First action in an ordering * \param[in] then Then action in an ordering * \param[in] first_flags Action flags for \p first for ordering purposes * \param[in] then_flags Action flags for \p then for ordering purposes * \param[in] order Action wrapper for \p first in ordering * \param[in] data_set Cluster working set * * \return Mask of pe_graph_updated_first and/or pe_graph_updated_then */ static enum pe_graph_flags update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then, enum pe_action_flags first_flags, enum pe_action_flags then_flags, pe_action_wrapper_t *order, pe_working_set_t *data_set) { enum pe_graph_flags changed = pe_graph_none; /* The node will only be used for clones. If interleaved, node will be NULL, * otherwise the ordering scope will be limited to the node. Normally, the * whole 'then' clone should restart if 'first' is restarted, so then->node * is needed. */ pe_node_t *node = then->node; if (pcmk_is_set(order->type, pe_order_implies_then_on_node)) { /* For unfencing, only instances of 'then' on the same node as 'first' * (the unfencing operation) should restart, so reset node to * first->node, at which point this case is handled like a normal * pe_order_implies_then. */ pe__clear_order_flags(order->type, pe_order_implies_then_on_node); pe__set_order_flags(order->type, pe_order_implies_then); node = first->node; pe_rsc_trace(then->rsc, "%s then %s: mapped pe_order_implies_then_on_node to " "pe_order_implies_then on %s", first->uuid, then->uuid, node->details->uname); } if (pcmk_is_set(order->type, pe_order_implies_then)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional, pe_action_optional, pe_order_implies_then, data_set); } else if (!pcmk_is_set(first_flags, pe_action_optional) && pcmk_is_set(then->flags, pe_action_optional)) { pe__clear_action_flags(then, pe_action_optional); pe__set_graph_flags(changed, first, pe_graph_updated_then); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_then", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_restart) && (then->rsc != NULL)) { enum pe_action_flags restart = pe_action_optional|pe_action_runnable; changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, restart, pe_order_restart, data_set); pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_restart", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_implies_first)) { if (first->rsc != NULL) { changed |= first->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_optional, pe_order_implies_first, data_set); } else if (!pcmk_is_set(first_flags, pe_action_optional) && pcmk_is_set(first->flags, pe_action_runnable)) { pe__clear_action_flags(first, pe_action_runnable); pe__set_graph_flags(changed, first, pe_graph_updated_first); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_first", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_promoted_implies_first)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional, pe_action_optional, pe_order_promoted_implies_first, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_promoted_implies_first", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_one_or_more)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_one_or_more, data_set); } else if (pcmk_is_set(first_flags, pe_action_runnable)) { // We have another runnable instance of "first" then->runnable_before++; /* Mark "then" as runnable if it requires a certain number of * "before" instances to be runnable, and they now are. */ if ((then->runnable_before >= then->required_runnable_before) && !pcmk_is_set(then->flags, pe_action_runnable)) { pe__set_action_flags(then, pe_action_runnable); pe__set_graph_flags(changed, first, pe_graph_updated_then); } } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_one_or_more", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_probe) && (then->rsc != NULL)) { if (!pcmk_is_set(first_flags, pe_action_runnable) && (first->rsc->running_on != NULL)) { pe_rsc_trace(then->rsc, "%s then %s: ignoring because first is stopping", first->uuid, then->uuid); order->type = pe_order_none; } else { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_runnable_left, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_probe", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_runnable_left)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_runnable_left, data_set); } else if (!pcmk_is_set(first_flags, pe_action_runnable) && pcmk_is_set(then->flags, pe_action_runnable)) { pe__clear_action_flags(then, pe_action_runnable); pe__set_graph_flags(changed, first, pe_graph_updated_then); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_runnable_left", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_implies_first_migratable)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_optional, pe_order_implies_first_migratable, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after " "pe_order_implies_first_migratable", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_pseudo_left)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_optional, pe_order_pseudo_left, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_pseudo_left", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_optional)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_optional, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_optional", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(order->type, pe_order_asymmetrical)) { if (then->rsc != NULL) { changed |= then->rsc->cmds->update_actions(first, then, node, first_flags, pe_action_runnable, pe_order_asymmetrical, data_set); } pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_asymmetrical", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } if (pcmk_is_set(first->flags, pe_action_runnable) && pcmk_is_set(order->type, pe_order_implies_then_printed) && !pcmk_is_set(first_flags, pe_action_optional)) { pe_rsc_trace(then->rsc, "%s will be in graph because %s is required", then->uuid, first->uuid); pe__set_action_flags(then, pe_action_print_always); // Don't bother marking 'then' as changed just for this } if (pcmk_is_set(order->type, pe_order_implies_first_printed) && !pcmk_is_set(then_flags, pe_action_optional)) { pe_rsc_trace(then->rsc, "%s will be in graph because %s is required", first->uuid, then->uuid); pe__set_action_flags(first, pe_action_print_always); // Don't bother marking 'first' as changed just for this } if (pcmk_any_flags_set(order->type, pe_order_implies_then |pe_order_implies_first |pe_order_restart) && (first->rsc != NULL) && !pcmk_is_set(first->rsc->flags, pe_rsc_managed) && pcmk_is_set(first->rsc->flags, pe_rsc_block) && !pcmk_is_set(first->flags, pe_action_runnable) && pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)) { if (pcmk_is_set(then->flags, pe_action_runnable)) { pe__clear_action_flags(then, pe_action_runnable); pe__set_graph_flags(changed, first, pe_graph_updated_then); } pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first " "is blocked, unmanaged, unrunnable stop", first->uuid, then->uuid, (changed? "changed" : "unchanged")); } return changed; } // Convenience macros for logging action properties #define action_type_str(flags) \ (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action") #define action_optional_str(flags) \ (pcmk_is_set((flags), pe_action_optional)? "optional" : "required") #define action_runnable_str(flags) \ (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable") #define action_node_str(a) \ (((a)->node == NULL)? "no node" : (a)->node->details->uname) /*! * \internal * \brief Update an action's flags for all orderings where it is "then" * * \param[in] then Action to update * \param[in] data_set Cluster working set */ void pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set) { GList *lpc = NULL; enum pe_graph_flags changed = pe_graph_none; int last_flags = then->flags; pe_rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s", action_type_str(then->flags), then->uuid, action_optional_str(then->flags), action_runnable_str(then->flags), action_node_str(then)); if (pcmk_is_set(then->flags, pe_action_requires_any)) { /* Initialize current known "runnable before" actions. As * update_action_for_ordering_flags() is called for each of then's * before actions, this number will increment as runnable 'first' * actions are encountered. */ then->runnable_before = 0; if (then->required_runnable_before == 0) { /* @COMPAT This ordering constraint uses the deprecated * "require-all=false" attribute. Treat it like "clone-min=1". */ then->required_runnable_before = 1; } /* The pe_order_one_or_more clause of update_action_for_ordering_flags() * (called below) will reset runnable if appropriate. */ pe__clear_action_flags(then, pe_action_runnable); } for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data; pe_action_t *first = other->action; pe_node_t *then_node = then->node; pe_node_t *first_node = first->node; if ((first->rsc != NULL) && (first->rsc->variant == pe_group) && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) { first_node = first->rsc->fns->location(first->rsc, NULL, FALSE); if (first_node != NULL) { pe_rsc_trace(first->rsc, "Found node %s for 'first' %s", first_node->details->uname, first->uuid); } } if ((then->rsc != NULL) && (then->rsc->variant == pe_group) && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) { then_node = then->rsc->fns->location(then->rsc, NULL, FALSE); if (then_node != NULL) { pe_rsc_trace(then->rsc, "Found node %s for 'then' %s", then_node->details->uname, then->uuid); } } // Disable constraint if it only applies when on same node, but isn't if (pcmk_is_set(other->type, pe_order_same_node) && (first_node != NULL) && (then_node != NULL) && (first_node->details != then_node->details)) { pe_rsc_trace(then->rsc, "Disabled ordering %s on %s then %s on %s: not same node", other->action->uuid, first_node->details->uname, then->uuid, then_node->details->uname); other->type = pe_order_none; continue; } pe__clear_graph_flags(changed, then, pe_graph_updated_first); if ((first->rsc != NULL) && pcmk_is_set(other->type, pe_order_then_cancels_first) && !pcmk_is_set(then->flags, pe_action_optional)) { /* 'then' is required, so we must abandon 'first' * (e.g. a required stop cancels any agent reload). */ pe__set_action_flags(other->action, pe_action_optional); if (!strcmp(first->task, CRMD_ACTION_RELOAD_AGENT)) { pe__clear_resource_flags(first->rsc, pe_rsc_reload); } } if ((first->rsc != NULL) && (then->rsc != NULL) && (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) { first = action_for_ordering(first); } if (first != other->action) { pe_rsc_trace(then->rsc, "Ordering %s after %s instead of %s", then->uuid, first->uuid, other->action->uuid); } pe_rsc_trace(then->rsc, "%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s", first->uuid, first->flags, then->uuid, then->flags, other->type, action_node_str(first)); if (first == other->action) { /* 'first' was not remapped (e.g. from 'start' to 'running'), which * could mean it is a non-resource action, a primitive resource * action, or already expanded. */ enum pe_action_flags first_flags, then_flags; first_flags = action_flags_for_ordering(first, then_node); then_flags = action_flags_for_ordering(then, first_node); changed |= update_action_for_ordering_flags(first, then, first_flags, then_flags, other, data_set); /* 'first' was for a complex resource (clone, group, etc), * create a new dependency if necessary */ } else if (order_actions(first, then, other->type)) { /* This was the first time 'first' and 'then' were associated, * start again to get the new actions_before list */ pe__set_graph_flags(changed, then, pe_graph_updated_then|pe_graph_disable); } if (pcmk_is_set(changed, pe_graph_disable)) { pe_rsc_trace(then->rsc, "Disabled ordering %s then %s in favor of %s then %s", other->action->uuid, then->uuid, first->uuid, then->uuid); pe__clear_graph_flags(changed, then, pe_graph_disable); other->type = pe_order_none; } if (pcmk_is_set(changed, pe_graph_updated_first)) { crm_trace("Re-processing %s and its 'after' actions " "because it changed", first->uuid); for (GList *lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data; pcmk__update_action_for_orderings(other->action, data_set); } pcmk__update_action_for_orderings(first, data_set); } } if (pcmk_is_set(then->flags, pe_action_requires_any)) { if (last_flags == then->flags) { pe__clear_graph_flags(changed, then, pe_graph_updated_then); } else { pe__set_graph_flags(changed, then, pe_graph_updated_then); } } if (pcmk_is_set(changed, pe_graph_updated_then)) { crm_trace("Re-processing %s and its 'after' actions because it changed", then->uuid); if (pcmk_is_set(last_flags, pe_action_runnable) && !pcmk_is_set(then->flags, pe_action_runnable)) { pcmk__block_colocated_starts(then, data_set); } pcmk__update_action_for_orderings(then, data_set); for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data; pcmk__update_action_for_orderings(other->action, data_set); } } } /*! * \internal * \brief Trace-log an action (optionally with its dependent actions) * * \param[in] pre_text If not NULL, prefix the log with this plus ": " * \param[in] action Action to log * \param[in] details If true, recursively log dependent actions */ void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details) { const char *node_uname = NULL; const char *node_uuid = NULL; const char *desc = NULL; CRM_CHECK(action != NULL, return); if (!pcmk_is_set(action->flags, pe_action_pseudo)) { if (action->node != NULL) { node_uname = action->node->details->uname; node_uuid = action->node->details->id; } else { node_uname = ""; } } switch (text2task(action->task)) { case stonith_node: case shutdown_crm: if (pcmk_is_set(action->flags, pe_action_pseudo)) { desc = "Pseudo "; } else if (pcmk_is_set(action->flags, pe_action_optional)) { desc = "Optional "; } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { desc = "!!Non-Startable!! "; } else if (pcmk_is_set(action->flags, pe_action_processed)) { desc = ""; } else { desc = "(Provisional) "; } crm_trace("%s%s%sAction %d: %s%s%s%s%s%s", ((pre_text == NULL)? "" : pre_text), ((pre_text == NULL)? "" : ": "), desc, action->id, action->uuid, (node_uname? "\ton " : ""), (node_uname? node_uname : ""), (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""), (node_uuid? ")" : "")); break; default: if (pcmk_is_set(action->flags, pe_action_optional)) { desc = "Optional "; } else if (pcmk_is_set(action->flags, pe_action_pseudo)) { desc = "Pseudo "; } else if (!pcmk_is_set(action->flags, pe_action_runnable)) { desc = "!!Non-Startable!! "; } else if (pcmk_is_set(action->flags, pe_action_processed)) { desc = ""; } else { desc = "(Provisional) "; } crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s", ((pre_text == NULL)? "" : pre_text), ((pre_text == NULL)? "" : ": "), desc, action->id, action->uuid, (action->rsc? action->rsc->id : ""), (node_uname? "\ton " : ""), (node_uname? node_uname : ""), (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""), (node_uuid? ")" : "")); break; } if (details) { GList *iter = NULL; crm_trace("\t\t====== Preceding Actions"); for (iter = action->actions_before; iter != NULL; iter = iter->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) iter->data; pcmk__log_action("\t\t", other->action, false); } crm_trace("\t\t====== Subsequent Actions"); for (iter = action->actions_after; iter != NULL; iter = iter->next) { pe_action_wrapper_t *other = (pe_action_wrapper_t *) iter->data; pcmk__log_action("\t\t", other->action, false); } crm_trace("\t\t====== End"); } else { crm_trace("\t\t(before=%d, after=%d)", g_list_length(action->actions_before), g_list_length(action->actions_after)); } } -/*! - * \internal - * \brief Create a new pseudo-action for a resource - * - * \param[in] rsc Resource to create action for - * \param[in] task Action name - * \param[in] optional Whether action should be considered optional - * \param[in] runnable Whethe action should be considered runnable - * - * \return New action object corresponding to arguments - */ -pe_action_t * -pcmk__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, - bool optional, bool runnable) -{ - pe_action_t *action = NULL; - - CRM_ASSERT((rsc != NULL) && (task != NULL)); - - action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL, - optional, TRUE, rsc->cluster); - pe__set_action_flags(action, pe_action_pseudo); - if (runnable) { - pe__set_action_flags(action, pe_action_runnable); - } - return action; -} - /*! * \internal * \brief Create an executor cancel action * * \param[in] rsc Resource of action to cancel * \param[in] task Name of action to cancel * \param[in] interval_ms Interval of action to cancel * \param[in] node Node of action to cancel * \param[in] data_set Working set of cluster * * \return Created op */ pe_action_t * pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node) { pe_action_t *cancel_op = NULL; char *key = NULL; char *interval_ms_s = NULL; CRM_ASSERT((rsc != NULL) && (task != NULL) && (node != NULL)); // @TODO dangerous if possible to schedule another action with this key key = pcmk__op_key(rsc->id, task, interval_ms); cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE, rsc->cluster); pcmk__str_update(&cancel_op->task, RSC_CANCEL); pcmk__str_update(&cancel_op->cancel_task, task); interval_ms_s = crm_strdup_printf("%u", interval_ms); add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, task); add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL_MS, interval_ms_s); free(interval_ms_s); return cancel_op; } /*! * \internal * \brief Create a new shutdown action for a node * * \param[in] node Node being shut down * * \return Newly created shutdown action for \p node */ pe_action_t * pcmk__new_shutdown_action(pe_node_t *node) { char *shutdown_id = NULL; pe_action_t *shutdown_op = NULL; CRM_ASSERT(node != NULL); shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname); shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, node, FALSE, TRUE, node->details->data_set); pcmk__order_stops_before_shutdown(node, shutdown_op, node->details->data_set); add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); return shutdown_op; } /*! * \internal * \brief Calculate and add an operation digest to XML * * Calculate an operation digest, which enables us to later determine when a * restart is needed due to the resource's parameters being changed, and add it * to given XML. * * \param[in] op Operation result from executor * \param[in] update XML to add digest to */ static void add_op_digest_to_xml(lrmd_event_data_t *op, xmlNode *update) { char *digest = NULL; xmlNode *args_xml = NULL; if (op->params == NULL) { return; } args_xml = create_xml_node(NULL, XML_TAG_PARAMS); g_hash_table_foreach(op->params, hash2field, args_xml); pcmk__filter_op_for_digest(args_xml); digest = calculate_operation_digest(args_xml, NULL); crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest); free_xml(args_xml); free(digest); } #define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" /*! * \internal * \brief Create XML for resource operation history update * * \param[in,out] parent Parent XML node to add to * \param[in,out] op Operation event data * \param[in] caller_version DC feature set * \param[in] target_rc Expected result of operation * \param[in] node Name of node on which operation was performed * \param[in] origin Arbitrary description of update source * * \return Newly created XML node for history update */ xmlNode * pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op, const char *caller_version, int target_rc, const char *node, const char *origin) { char *key = NULL; char *magic = NULL; char *op_id = NULL; char *op_id_additional = NULL; char *local_user_data = NULL; const char *exit_reason = NULL; xmlNode *xml_op = NULL; const char *task = NULL; CRM_CHECK(op != NULL, return NULL); crm_trace("Creating history XML for %s-interval %s action for %s on %s " "(DC version: %s, origin: %s)", pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id, ((node == NULL)? "no node" : node), caller_version, origin); task = op->op_type; /* Record a successful agent reload as a start, and a failed one as a * monitor, to make life easier for the scheduler when determining the * current state. * * @COMPAT We should check "reload" here only if the operation was for a * pre-OCF-1.1 resource agent, but we don't know that here, and we should * only ever get results for actions scheduled by us, so we can reasonably * assume any "reload" is actually a pre-1.1 agent reload. */ if (pcmk__str_any_of(task, CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT, NULL)) { if (op->op_status == PCMK_EXEC_DONE) { task = CRMD_ACTION_START; } else { task = CRMD_ACTION_STATUS; } } key = pcmk__op_key(op->rsc_id, task, op->interval_ms); if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) { const char *n_type = crm_meta_value(op->params, "notify_type"); const char *n_task = crm_meta_value(op->params, "notify_operation"); CRM_LOG_ASSERT(n_type != NULL); CRM_LOG_ASSERT(n_task != NULL); op_id = pcmk__notify_key(op->rsc_id, n_type, n_task); if (op->op_status != PCMK_EXEC_PENDING) { /* Ignore notify errors. * * @TODO It might be better to keep the correct result here, and * ignore it in process_graph_event(). */ lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); } /* Migration history is preserved separately, which usually matters for * multiple nodes and is important for future cluster transitions. */ } else if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) { op_id = strdup(key); } else if (did_rsc_op_fail(op, target_rc)) { op_id = pcmk__op_key(op->rsc_id, "last_failure", 0); if (op->interval_ms == 0) { // Ensure 'last' gets updated, in case record-pending is true op_id_additional = pcmk__op_key(op->rsc_id, "last", 0); } exit_reason = op->exit_reason; } else if (op->interval_ms > 0) { op_id = strdup(key); } else { op_id = pcmk__op_key(op->rsc_id, "last", 0); } again: xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id); if (xml_op == NULL) { xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP); } if (op->user_data == NULL) { crm_debug("Generating fake transition key for: " PCMK__OP_FMT " %d from %s", op->rsc_id, op->op_type, op->interval_ms, op->call_id, origin); local_user_data = pcmk__transition_key(-1, op->call_id, target_rc, FAKE_TE_ID); op->user_data = local_user_data; } if (magic == NULL) { magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc, (const char *) op->user_data); } crm_xml_add(xml_op, XML_ATTR_ID, op_id); crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key); crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task); crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin); crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version); crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data); crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic); crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason); crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */ crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id); crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc); crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status); crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms); if (compare_version("2.1", caller_version) <= 0) { if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) { crm_trace("Timing data (" PCMK__OP_FMT "): last=%u change=%u exec=%u queue=%u", op->rsc_id, op->op_type, op->interval_ms, op->t_run, op->t_rcchange, op->exec_time, op->queue_time); if ((op->interval_ms != 0) && (op->t_rcchange != 0)) { // Recurring ops may have changed rc after initial run crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_rcchange); } else { crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE, (long long) op->t_run); } crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time); crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time); } } if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) { /* * Record migrate_source and migrate_target always for migrate ops. */ const char *name = XML_LRM_ATTR_MIGRATE_SOURCE; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); name = XML_LRM_ATTR_MIGRATE_TARGET; crm_xml_add(xml_op, name, crm_meta_value(op->params, name)); } add_op_digest_to_xml(op, xml_op); if (op_id_additional) { free(op_id); op_id = op_id_additional; op_id_additional = NULL; goto again; } if (local_user_data) { free(local_user_data); op->user_data = NULL; } free(magic); free(op_id); free(key); return xml_op; } /*! * \internal * \brief Check whether an action shutdown-locks a resource to a node * * If the shutdown-lock cluster property is set, resources will not be recovered * on a different node if cleanly stopped, and may start only on that same node. * This function checks whether that applies to a given action, so that the * transition graph can be marked appropriately. * * \param[in] action Action to check * * \return true if \p action locks its resource to the action's node, * otherwise false */ bool pcmk__action_locks_rsc_to_node(const pe_action_t *action) { // Only resource actions taking place on resource's lock node are locked if ((action == NULL) || (action->rsc == NULL) || (action->rsc->lock_node == NULL) || (action->node == NULL) || (action->node->details != action->rsc->lock_node->details)) { return false; } /* During shutdown, only stops are locked (otherwise, another action such as * a demote would cause the controller to clear the lock) */ if (action->node->details->shutdown && (action->task != NULL) && (strcmp(action->task, RSC_STOP) != 0)) { return false; } return true; } /* lowest to highest */ static gint sort_action_id(gconstpointer a, gconstpointer b) { const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a; const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b; if (a == NULL) { return 1; } if (b == NULL) { return -1; } if (action_wrapper1->action->id < action_wrapper2->action->id) { return 1; } if (action_wrapper1->action->id > action_wrapper2->action->id) { return -1; } return 0; } /*! * \internal * \brief Remove any duplicate action inputs, merging action flags * * \param[in] action Action whose inputs should be checked */ void pcmk__deduplicate_action_inputs(pe_action_t *action) { GList *item = NULL; GList *next = NULL; pe_action_wrapper_t *last_input = NULL; action->actions_before = g_list_sort(action->actions_before, sort_action_id); for (item = action->actions_before; item != NULL; item = next) { pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data; next = item->next; if ((last_input != NULL) && (input->action->id == last_input->action->id)) { crm_trace("Input %s (%d) duplicate skipped for action %s (%d)", input->action->uuid, input->action->id, action->uuid, action->id); /* For the purposes of scheduling, the ordering flags no longer * matter, but crm_simulate looks at certain ones when creating a * dot graph. Combining the flags is sufficient for that purpose. */ last_input->type |= input->type; if (input->state == pe_link_dumped) { last_input->state = pe_link_dumped; } free(item->data); action->actions_before = g_list_delete_link(action->actions_before, item); } else { last_input = input; input->state = pe_link_not_dumped; } } } /*! * \internal * \brief Output all scheduled actions * * \param[in] data_set Cluster working set */ void pcmk__output_actions(pe_working_set_t *data_set) { pcmk__output_t *out = data_set->priv; // Output node (non-resource) actions for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) { char *node_name = NULL; char *task = NULL; pe_action_t *action = (pe_action_t *) iter->data; if (action->rsc != NULL) { continue; // Resource actions will be output later } else if (pcmk_is_set(action->flags, pe_action_optional)) { continue; // This action was not scheduled } if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) { task = strdup("Shutdown"); } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) { const char *op = g_hash_table_lookup(action->meta, "stonith_action"); task = crm_strdup_printf("Fence (%s)", op); } else { continue; // Don't display other node action types } if (pe__is_guest_node(action->node)) { node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id); } else if (action->node != NULL) { node_name = crm_strdup_printf("%s", action->node->details->uname); } out->message(out, "node-action", task, node_name, action->reason); free(node_name); free(task); } // Output resource actions for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; rsc->cmds->output_actions(rsc); } } /*! * \internal * \brief Schedule cancellation of a recurring action * * \param[in] rsc Resource that action is for * \param[in] call_id Action's call ID from history * \param[in] task Action name * \param[in] interval_ms Action interval * \param[in] node Node that history entry is for * \param[in] reason Short description of why action is being cancelled */ static void schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task, guint interval_ms, pe_node_t *node, const char *reason) { pe_action_t *cancel = NULL; CRM_CHECK((rsc != NULL) && (task != NULL) && (node != NULL) && (reason != NULL), return); crm_info("Recurring %s-interval %s for %s will be stopped on %s: %s", pcmk__readable_interval(interval_ms), task, rsc->id, pcmk__s(node->details->uname, "unknown node"), reason); cancel = pcmk__new_cancel_action(rsc, task, interval_ms, node); add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id); // Cancellations happen after stops pcmk__new_ordering(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, rsc->cluster); } /*! * \internal * \brief Check whether action from resource history is still in configuration * * \param[in] rsc Resource that action is for * \param[in] task Action's name * \param[in] interval_ms Action's interval (in milliseconds) * * \return true if action is still in resource configuration, otherwise false */ static bool action_in_config(pe_resource_t *rsc, const char *task, guint interval_ms) { char *key = pcmk__op_key(rsc->id, task, interval_ms); bool config = (find_rsc_op_entry(rsc, key) != NULL); free(key); return config; } /*! * \internal * \brief Get action name needed to compare digest for configuration changes * * \param[in] task Action name from history * \param[in] interval_ms Action interval (in milliseconds) * * \return Action name whose digest should be compared */ static const char * task_for_digest(const char *task, guint interval_ms) { /* Certain actions need to be compared against the parameters used to start * the resource. */ if ((interval_ms == 0) && pcmk__str_any_of(task, RSC_STATUS, RSC_MIGRATED, RSC_PROMOTE, NULL)) { task = RSC_START; } return task; } /*! * \internal * \brief Check whether only sanitized parameters to an action changed * * When collecting CIB files for troubleshooting, crm_report will mask * sensitive resource parameters. If simulations were run using that, affected * resources would appear to need a restart, which would complicate * troubleshooting. To avoid that, we save a "secure digest" of non-sensitive * parameters. This function used that digest to check whether only masked * parameters are different. * * \param[in] xml_op Resource history entry with secure digest * \param[in] digest_data Operation digest information being compared * \param[in] data_set Cluster working set * * \return true if only sanitized parameters changed, otherwise false */ static bool only_sanitized_changed(xmlNode *xml_op, const op_digest_cache_t *digest_data, pe_working_set_t *data_set) { const char *digest_secure = NULL; if (!pcmk_is_set(data_set->flags, pe_flag_sanitized)) { // The scheduler is not being run as a simulation return false; } digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST); return (digest_data->rc != RSC_DIGEST_MATCH) && (digest_secure != NULL) && (digest_data->digest_secure_calc != NULL) && (strcmp(digest_data->digest_secure_calc, digest_secure) == 0); } /*! * \internal * \brief Force a restart due to a configuration change * * \param[in] rsc Resource that action is for * \param[in] task Name of action whose configuration changed * \param[in] interval_ms Action interval (in milliseconds) * \param[in] node Node where resource should be restarted */ static void force_restart(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node) { char *key = pcmk__op_key(rsc->id, task, interval_ms); pe_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE, rsc->cluster); pe_action_set_reason(required, "resource definition change", true); trigger_unfencing(rsc, node, "Device parameters changed", NULL, rsc->cluster); } /*! * \internal * \brief Reschedule a recurring action * * \param[in] rsc Resource that action is for * \param[in] task Name of action being rescheduled * \param[in] interval_ms Action interval (in milliseconds) * \param[in] node Node where action should be rescheduled */ static void reschedule_recurring(pe_resource_t *rsc, const char *task, guint interval_ms, pe_node_t *node) { pe_action_t *op = NULL; trigger_unfencing(rsc, node, "Device parameters changed (reschedule)", NULL, rsc->cluster); op = custom_action(rsc, pcmk__op_key(rsc->id, task, interval_ms), task, node, TRUE, TRUE, rsc->cluster); pe__set_action_flags(op, pe_action_reschedule); } /*! * \internal * \brief Schedule a reload of a resource on a node * * \param[in] rsc Resource to reload * \param[in] node Where resource should be reloaded */ static void schedule_reload(pe_resource_t *rsc, pe_node_t *node) { pe_action_t *reload = NULL; // For collective resources, just call recursively for children if (rsc->variant > pe_native) { g_list_foreach(rsc->children, (GFunc) schedule_reload, node); return; } // Skip the reload in certain situations if ((node == NULL) || !pcmk_is_set(rsc->flags, pe_rsc_managed) || pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s", rsc->id, pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " unmanaged", pcmk_is_set(rsc->flags, pe_rsc_failed)? " failed" : "", (node == NULL)? "inactive" : node->details->uname); return; } /* If a resource's configuration changed while a start was pending, * force a full restart instead of a reload. */ if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) { pe_rsc_trace(rsc, "%s: preventing agent reload because start pending", rsc->id); custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE, rsc->cluster); return; } // Schedule the reload pe__set_resource_flags(rsc, pe_rsc_reload); reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node, FALSE, TRUE, rsc->cluster); pe_action_set_reason(reload, "resource definition change", FALSE); // Set orderings so that a required stop or demote cancels the reload pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, rsc->cluster); pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL, pe_order_optional|pe_order_then_cancels_first, rsc->cluster); } /*! * \internal * \brief Handle any configuration change for an action * * Given an action from resource history, if the resource's configuration * changed since the action was done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, etc.). * * \param[in] rsc Resource that action is for * \param[in] node Node that action was on * \param[in] xml_op Action XML from resource history * * \return true if action configuration changed, otherwise false */ bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op) { guint interval_ms = 0; const char *task = NULL; const op_digest_cache_t *digest_data = NULL; CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL), return false); task = crm_element_value(xml_op, XML_LRM_ATTR_TASK); CRM_CHECK(task != NULL, return false); crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); // If this is a recurring action, check whether it has been orphaned if (interval_ms > 0) { if (action_in_config(rsc, task, interval_ms)) { pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration", pcmk__readable_interval(interval_ms), task, rsc->id, node->details->uname); } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_action_orphans)) { schedule_cancel(rsc, crm_element_value(xml_op, XML_LRM_ATTR_CALLID), task, interval_ms, node, "orphan"); return true; } else { pe_rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned", pcmk__readable_interval(interval_ms), task, rsc->id, node->details->uname); return true; } } crm_trace("Checking %s-interval %s for %s on %s for configuration changes", pcmk__readable_interval(interval_ms), task, rsc->id, node->details->uname); task = task_for_digest(task, interval_ms); digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster); if (only_sanitized_changed(xml_op, digest_data, rsc->cluster)) { if (!pcmk__is_daemon && (rsc->cluster->priv != NULL)) { pcmk__output_t *out = rsc->cluster->priv; out->info(out, "Only 'private' parameters to %s-interval %s for %s " "on %s changed: %s", pcmk__readable_interval(interval_ms), task, rsc->id, node->details->uname, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC)); } return false; } switch (digest_data->rc) { case RSC_DIGEST_RESTART: crm_log_xml_debug(digest_data->params_restart, "params:restart"); force_restart(rsc, task, interval_ms, node); return true; case RSC_DIGEST_ALL: case RSC_DIGEST_UNKNOWN: // Changes that can potentially be handled by an agent reload if (interval_ms > 0) { /* Recurring actions aren't reloaded per se, they are just * re-scheduled so the next run uses the new parameters. * The old instance will be cancelled automatically. */ crm_log_xml_debug(digest_data->params_all, "params:reschedule"); reschedule_recurring(rsc, task, interval_ms, node); } else if (crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) { // Agent supports reload, so use it trigger_unfencing(rsc, node, "Device parameters changed (reload)", NULL, rsc->cluster); crm_log_xml_debug(digest_data->params_all, "params:reload"); schedule_reload(rsc, node); } else { pe_rsc_trace(rsc, "Restarting %s because agent doesn't support reload", rsc->id); crm_log_xml_debug(digest_data->params_restart, "params:restart"); force_restart(rsc, task, interval_ms, node); } return true; default: break; } return false; } /*! * \internal * \brief Create a list of resource's action history entries, sorted by call ID * * \param[in] rsc Resource whose history should be checked * \param[in] rsc_entry Resource's status XML * \param[out] start_index Where to store index of start-like action, if any * \param[out] stop_index Where to store index of stop action, if any */ static GList * rsc_history_as_list(pe_resource_t *rsc, xmlNode *rsc_entry, int *start_index, int *stop_index) { GList *ops = NULL; for (xmlNode *rsc_op = first_named_child(rsc_entry, XML_LRM_TAG_RSC_OP); rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) { ops = g_list_prepend(ops, rsc_op); } ops = g_list_sort(ops, sort_op_by_callid); calculate_active_ops(ops, start_index, stop_index); return ops; } /*! * \internal * \brief Process a resource's action history from the CIB status * * Given a resource's action history, if the resource's configuration * changed since the actions were done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, clean-up, etc.). * (This also cancels recurring actions for maintenance mode, which is not * entirely related but convenient to do here.) * * \param[in] rsc_entry Resource's status XML * \param[in] rsc Resource whose history is being processed * \param[in] node Node whose history is being processed */ static void process_rsc_history(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node) { int offset = -1; int stop_index = 0; int start_index = 0; GList *sorted_op_list = NULL; if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { if (pe_rsc_is_anon_clone(uber_parent(rsc))) { pe_rsc_trace(rsc, "Skipping configuration check " "for orphaned clone instance %s", rsc->id); } else { pe_rsc_trace(rsc, "Skipping configuration check and scheduling clean-up " "for orphaned resource %s", rsc->id); DeleteRsc(rsc, node, FALSE, rsc->cluster); } return; } if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) { if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) { DeleteRsc(rsc, node, FALSE, rsc->cluster); } pe_rsc_trace(rsc, "Skipping configuration check for %s " "because no longer active on %s", rsc->id, node->details->uname); return; } pe_rsc_trace(rsc, "Checking for configuration changes for %s on %s", rsc->id, node->details->uname); if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) { DeleteRsc(rsc, node, FALSE, rsc->cluster); } sorted_op_list = rsc_history_as_list(rsc, rsc_entry, &start_index, &stop_index); if (start_index < stop_index) { return; // Resource is stopped } for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) { xmlNode *rsc_op = (xmlNode *) iter->data; const char *task = NULL; guint interval_ms = 0; if (++offset < start_index) { // Skip actions that happened before a start continue; } task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms); if ((interval_ms > 0) && (pcmk_is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { // Maintenance mode cancels recurring operations schedule_cancel(rsc, crm_element_value(rsc_op, XML_LRM_ATTR_CALLID), task, interval_ms, node, "maintenance mode"); } else if ((interval_ms > 0) || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START, RSC_PROMOTE, RSC_MIGRATED, NULL)) { /* If a resource operation failed, and the operation's definition * has changed, clear any fail count so they can be retried fresh. */ if (pe__bundle_needs_remote_name(rsc, rsc->cluster)) { /* We haven't allocated resources to nodes yet, so if the * REMOTE_CONTAINER_HACK is used, we may calculate the digest * based on the literal "#uname" value rather than the properly * substituted value. That would mistakenly make the action * definition appear to have been changed. Defer the check until * later in this case. */ pe__add_param_check(rsc_op, rsc, node, pe_check_active, rsc->cluster); } else if (pcmk__check_action_config(rsc, node, rsc_op) && (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, rsc->cluster) != 0)) { pe__clear_failcount(rsc, node, "action definition changed", rsc->cluster); } } } g_list_free(sorted_op_list); } /*! * \internal * \brief Process a node's action history from the CIB status * * Given a node's resource history, if the resource's configuration changed * since the actions were done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, clean-up, etc.). * (This also cancels recurring actions for maintenance mode, which is not * entirely related but convenient to do here.) * * \param[in] node Node whose history is being processed * \param[in] lrm_rscs Node's from CIB status XML * \param[in] data_set Cluster working set */ static void process_node_history(pe_node_t *node, xmlNode *lrm_rscs, pe_working_set_t *data_set) { crm_trace("Processing history for node %s", node->details->uname); for (xmlNode *rsc_entry = first_named_child(lrm_rscs, XML_LRM_TAG_RESOURCE); rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { if (xml_has_children(rsc_entry)) { GList *result = pcmk__rscs_matching_id(ID(rsc_entry), data_set); for (GList *iter = result; iter != NULL; iter = iter->next) { pe_resource_t *rsc = (pe_resource_t *) iter->data; if (rsc->variant == pe_native) { process_rsc_history(rsc_entry, rsc, node); } } g_list_free(result); } } } // XPath to find a node's resource history #define XPATH_NODE_HISTORY "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \ "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \ "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES /*! * \internal * \brief Process any resource configuration changes in the CIB status * * Go through all nodes' resource history, and if a resource's configuration * changed since its actions were done, schedule any actions needed (restart, * reload, unfencing, rescheduling recurring actions, clean-up, etc.). * (This also cancels recurring actions for maintenance mode, which is not * entirely related but convenient to do here.) * * \param[in] data_set Cluster working set */ void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set) { crm_trace("Check resource and action configuration for changes"); /* Rather than iterate through the status section, iterate through the nodes * and search for the appropriate status subsection for each. This skips * orphaned nodes and lets us eliminate some cases before searching the XML. */ for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; /* Don't bother checking actions for a node that can't run actions ... * unless it's in maintenance mode, in which case we still need to * cancel any existing recurring monitors. */ if (node->details->maintenance || pcmk__node_available(node, false, false)) { char *xpath = NULL; xmlNode *history = NULL; xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname); history = get_xpath_object(xpath, data_set->input, LOG_NEVER); free(xpath); process_node_history(node, history, data_set); } } } diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c index f1e3022a3c..39aae9708f 100644 --- a/lib/pacemaker/pcmk_sched_bundle.c +++ b/lib/pacemaker/pcmk_sched_bundle.c @@ -1,1128 +1,1128 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "libpacemaker_private.h" #define PE__VARIANT_BUNDLE 1 #include static bool is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node) { for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (node->details == replica->node->details) { return TRUE; } } return FALSE; } void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes, int max, int per_host_max, pe_working_set_t * data_set); static GList * get_container_list(pe_resource_t *rsc) { GList *containers = NULL; if (rsc->variant == pe_container) { pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, rsc); for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; containers = g_list_append(containers, replica->container); } } return containers; } static inline GList * get_containers_or_children(pe_resource_t *rsc) { return (rsc->variant == pe_container)? get_container_list(rsc) : rsc->children; } pe_node_t * pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set) { GList *containers = NULL; GList *nodes = NULL; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return NULL); get_bundle_variant_data(bundle_data, rsc); pe__set_resource_flags(rsc, pe_rsc_allocating); containers = get_container_list(rsc); pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, data_set); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = pcmk__sort_nodes(nodes, NULL, data_set); containers = g_list_sort(containers, pcmk__cmp_instance); distribute_children(rsc, containers, nodes, bundle_data->nreplicas, bundle_data->nreplicas_per_host, data_set); g_list_free(nodes); g_list_free(containers); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; pe_node_t *container_host = NULL; CRM_ASSERT(replica); if (replica->ip) { pe_rsc_trace(rsc, "Allocating bundle %s IP %s", rsc->id, replica->ip->id); replica->ip->cmds->allocate(replica->ip, prefer, data_set); } container_host = replica->container->allocated_to; if (replica->remote && pe__is_guest_or_remote_node(container_host)) { /* We need 'nested' connection resources to be on the same * host because pacemaker-remoted only supports a single * active connection */ pcmk__new_colocation("child-remote-with-docker-remote", NULL, INFINITY, replica->remote, container_host->details->remote_rsc, NULL, NULL, true, data_set); } if (replica->remote) { pe_rsc_trace(rsc, "Allocating bundle %s connection %s", rsc->id, replica->remote->id); replica->remote->cmds->allocate(replica->remote, prefer, data_set); } // Explicitly allocate replicas' children before bundle child if (replica->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, replica->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if (node->details != replica->node->details) { node->weight = -INFINITY; } else if (!pcmk__threshold_reached(replica->child, node, NULL)) { node->weight = INFINITY; } } pe__set_resource_flags(replica->child->parent, pe_rsc_allocating); pe_rsc_trace(rsc, "Allocating bundle %s replica child %s", rsc->id, replica->child->id); replica->child->cmds->allocate(replica->child, replica->node, data_set); pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating); } } if (bundle_data->child) { pe_node_t *node = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { if (is_bundle_node(bundle_data, node)) { node->weight = 0; } else { node->weight = -INFINITY; } } pe_rsc_trace(rsc, "Allocating bundle %s child %s", rsc->id, bundle_data->child->id); bundle_data->child->cmds->allocate(bundle_data->child, prefer, data_set); } pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional); return NULL; } void pcmk__bundle_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set) { pe_action_t *action = NULL; GList *containers = NULL; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); containers = get_container_list(rsc); get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->ip) { replica->ip->cmds->create_actions(replica->ip, data_set); } if (replica->container) { replica->container->cmds->create_actions(replica->container, data_set); } if (replica->remote) { replica->remote->cmds->create_actions(replica->remote, data_set); } } clone_create_pseudo_actions(rsc, containers, NULL, NULL, data_set); if (bundle_data->child) { bundle_data->child->cmds->create_actions(bundle_data->child, data_set); if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) { /* promote */ - pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true); - action = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true); + pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true); + action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true); action->priority = INFINITY; /* demote */ - pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true); - action = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true); + pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true); + action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true); action->priority = INFINITY; } } g_list_free(containers); } void pcmk__bundle_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); if (bundle_data->child) { pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child, RSC_START, pe_order_implies_first_printed, data_set); pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child, RSC_STOP, pe_order_implies_first_printed, data_set); if (bundle_data->child->children) { pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } else { pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); } } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); CRM_ASSERT(replica->container); replica->container->cmds->internal_constraints(replica->container, data_set); pcmk__order_starts(rsc, replica->container, pe_order_runnable_left|pe_order_implies_first_printed, data_set); if (replica->child) { pcmk__order_stops(rsc, replica->child, pe_order_implies_first_printed, data_set); } pcmk__order_stops(rsc, replica->container, pe_order_implies_first_printed, data_set); pcmk__order_resource_actions(replica->container, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); pcmk__order_resource_actions(replica->container, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if (replica->ip) { replica->ip->cmds->internal_constraints(replica->ip, data_set); // Start IP then container pcmk__order_starts(replica->ip, replica->container, pe_order_runnable_left|pe_order_preserve, data_set); pcmk__order_stops(replica->container, replica->ip, pe_order_implies_first|pe_order_preserve, data_set); pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip, replica->container, NULL, NULL, true, data_set); } if (replica->remote) { /* This handles ordering and colocating remote relative to container * (via "resource-with-container"). Since IP is also ordered and * colocated relative to the container, we don't need to do anything * explicit here with IP. */ replica->remote->cmds->internal_constraints(replica->remote, data_set); } if (replica->child) { CRM_ASSERT(replica->remote); // "Start remote then child" is implicit in scheduler's remote logic } } if (bundle_data->child) { bundle_data->child->cmds->internal_constraints(bundle_data->child, data_set); if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) { promote_demote_constraints(rsc, data_set); /* child demoted before global demoted */ pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set); /* global demote before child demote */ pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child, RSC_DEMOTE, pe_order_implies_first_printed, data_set); /* child promoted before global promoted */ pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc, RSC_PROMOTED, pe_order_implies_then_printed, data_set); /* global promote before child promote */ pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child, RSC_PROMOTE, pe_order_implies_first_printed, data_set); } } } static pe_resource_t * compatible_replica_for_node(pe_resource_t *rsc_lh, pe_node_t *candidate, pe_resource_t *rsc, enum rsc_role_e filter, gboolean current) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(candidate != NULL, return NULL); get_bundle_variant_data(bundle_data, rsc); crm_trace("Looking for compatible child from %s for %s on %s", rsc_lh->id, rsc->id, candidate->details->uname); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (is_child_compatible(replica->container, candidate, filter, current)) { crm_trace("Pairing %s with %s on %s", rsc_lh->id, replica->container->id, candidate->details->uname); return replica->container; } } crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id); return NULL; } static pe_resource_t * compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc, enum rsc_role_e filter, gboolean current, pe_working_set_t *data_set) { GList *scratch = NULL; pe_resource_t *pair = NULL; pe_node_t *active_node_lh = NULL; active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current); if (active_node_lh) { return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter, current); } scratch = g_hash_table_get_values(rsc_lh->allowed_nodes); scratch = pcmk__sort_nodes(scratch, NULL, data_set); for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none")); done: g_list_free(scratch); return pair; } void pcmk__bundle_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } int copies_per_node(pe_resource_t * rsc) { /* Strictly speaking, there should be a 'copies_per_node' addition * to the resource function table and each case would be a * function. However that would be serious overkill to return an * int. In fact, it seems to me that both function tables * could/should be replaced by resources.{c,h} full of * rsc_{some_operation} functions containing a switch as below * which calls out to functions named {variant}_{some_operation} * as needed. */ switch(rsc->variant) { case pe_unknown: return 0; case pe_native: case pe_group: return 1; case pe_clone: { const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX); if (max_clones_node == NULL) { return 1; } else { int max_i; pcmk__scan_min_int(max_clones_node, &max_i, 0); return max_i; } } case pe_container: { pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, rsc); return data->nreplicas_per_host; } } return 0; } void pcmk__bundle_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { GList *allocated_primaries = NULL; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(dependent != NULL, pe_err("dependent was NULL for %s", constraint->id); return); CRM_CHECK(primary != NULL, pe_err("primary was NULL for %s", constraint->id); return); CRM_ASSERT(dependent->variant == pe_native); if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { pe_rsc_trace(primary, "%s is still provisional", primary->id); return; } else if(constraint->dependent->variant > pe_group) { pe_resource_t *primary_replica = compatible_replica(dependent, primary, RSC_ROLE_UNKNOWN, FALSE, data_set); if (primary_replica) { pe_rsc_debug(primary, "Pairing %s with %s", dependent->id, primary_replica->id); dependent->cmds->rsc_colocation_lh(dependent, primary_replica, constraint, data_set); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", dependent->id, primary->id); pcmk__assign_resource(dependent, NULL, true); } else { pe_rsc_debug(primary, "Cannot pair %s with instance of %s", dependent->id, primary->id); } return; } get_bundle_variant_data(bundle_data, primary); pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d", constraint->id, dependent->id, primary->id, constraint->score); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (constraint->score < INFINITY) { replica->container->cmds->rsc_colocation_rh(dependent, replica->container, constraint, data_set); } else { pe_node_t *chosen = replica->container->fns->location(replica->container, NULL, FALSE); if ((chosen == NULL) || is_set_recursive(replica->container, pe_rsc_block, TRUE)) { continue; } if ((constraint->primary_role >= RSC_ROLE_PROMOTED) && (replica->child == NULL)) { continue; } if ((constraint->primary_role >= RSC_ROLE_PROMOTED) && (replica->child->next_role < RSC_ROLE_PROMOTED)) { continue; } pe_rsc_trace(primary, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); allocated_primaries = g_list_prepend(allocated_primaries, chosen); } } if (constraint->score >= INFINITY) { node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE); } g_list_free(allocated_primaries); } enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node) { GList *containers = NULL; enum pe_action_flags flags = 0; pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, action->rsc); if(data->child) { enum action_tasks task = get_complex_task(data->child, action->task, TRUE); switch(task) { case no_action: case action_notify: case action_notified: case action_promote: case action_promoted: case action_demote: case action_demoted: return summary_action_flags(action, data->child->children, node); default: break; } } containers = get_container_list(action->rsc); flags = summary_action_flags(action, containers, node); g_list_free(containers); return flags; } pe_resource_t * find_compatible_child_by_node(pe_resource_t * local_child, pe_node_t * local_node, pe_resource_t * rsc, enum rsc_role_e filter, gboolean current) { GList *gIter = NULL; GList *children = NULL; if (local_node == NULL) { crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id); return NULL; } crm_trace("Looking for compatible child from %s for %s on %s", local_child->id, rsc->id, local_node->details->uname); children = get_containers_or_children(rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; if(is_child_compatible(child_rsc, local_node, filter, current)) { crm_trace("Pairing %s with %s on %s", local_child->id, child_rsc->id, local_node->details->uname); return child_rsc; } } crm_trace("Can't pair %s with %s", local_child->id, rsc->id); if(children != rsc->children) { g_list_free(children); } return NULL; } static pe__bundle_replica_t * replica_for_container(pe_resource_t *rsc, pe_resource_t *container, pe_node_t *node) { if (rsc->variant == pe_container) { pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, rsc); for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->child && (container == replica->container) && (node->details == replica->node->details)) { return replica; } } } return NULL; } static enum pe_graph_flags multi_update_interleave_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set) { GList *gIter = NULL; GList *children = NULL; gboolean current = FALSE; enum pe_graph_flags changed = pe_graph_none; /* Fix this - lazy */ if (pcmk__ends_with(first->uuid, "_stopped_0") || pcmk__ends_with(first->uuid, "_demoted_0")) { current = TRUE; } children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *then_child = gIter->data; pe_resource_t *first_child = find_compatible_child(then_child, first->rsc, RSC_ROLE_UNKNOWN, current, data_set); if (first_child == NULL && current) { crm_trace("Ignore"); } else if (first_child == NULL) { crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid); /* Me no like this hack - but what else can we do? * * If there is no-one active or about to be active * on the same node as then_child, then they must * not be allowed to start */ if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) { pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); if (pcmk__assign_resource(then_child, NULL, true)) { pe__set_graph_flags(changed, first, pe_graph_updated_then); } } } else { pe_action_t *first_action = NULL; pe_action_t *then_action = NULL; enum action_tasks task = clone_child_action(first); const char *first_task = task2text(task); pe__bundle_replica_t *first_replica = NULL; pe__bundle_replica_t *then_replica = NULL; first_replica = replica_for_container(first->rsc, first_child, node); if (strstr(first->task, "stop") && first_replica && first_replica->child) { /* Except for 'stopped' we should be looking at the * in-container resource, actions for the child will * happen later and are therefor more likely to align * with the user's intent. */ first_action = find_first_action(first_replica->child->actions, NULL, task2text(task), node); } else { first_action = find_first_action(first_child->actions, NULL, task2text(task), node); } then_replica = replica_for_container(then->rsc, then_child, node); if (strstr(then->task, "mote") && then_replica && then_replica->child) { /* Promote/demote actions will never be found for the * container resource, look in the child instead * * Alternatively treat: * 'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and * 'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY' */ then_action = find_first_action(then_replica->child->actions, NULL, then->task, node); } else { then_action = find_first_action(then_child->actions, NULL, then->task, node); } if (first_action == NULL) { if (!pcmk_is_set(first_child->flags, pe_rsc_orphan) && !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) { crm_err("Internal error: No action found for %s in %s (first)", first_task, first_child->id); } else { crm_trace("No action found for %s in %s%s (first)", first_task, first_child->id, pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : ""); } continue; } /* We're only interested if 'then' is neither stopping nor being demoted */ if (then_action == NULL) { if (!pcmk_is_set(then_child->flags, pe_rsc_orphan) && !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) { crm_err("Internal error: No action found for %s in %s (then)", then->task, then_child->id); } else { crm_trace("No action found for %s in %s%s (then)", then->task, then_child->id, pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : ""); } continue; } if (order_actions(first_action, then_action, type)) { crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x", first_action->uuid, pcmk_is_set(first_action->flags, pe_action_optional), then_action->uuid, pcmk_is_set(then_action->flags, pe_action_optional), type); pe__set_graph_flags(changed, first, pe_graph_updated_first|pe_graph_updated_then); } if(first_action && then_action) { changed |= then_child->cmds->update_actions(first_action, then_action, node, first_child->cmds->action_flags(first_action, node), filter, type, data_set); } else { crm_err("Nothing found either for %s (%p) or %s (%p) %s", first_child->id, first_action, then_child->id, then_action, task2text(task)); } } } if(children != then->rsc->children) { g_list_free(children); } return changed; } static bool can_interleave_actions(pe_action_t *first, pe_action_t *then) { bool interleave = FALSE; pe_resource_t *rsc = NULL; const char *interleave_s = NULL; if(first->rsc == NULL || then->rsc == NULL) { crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc == then->rsc) { crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid); return FALSE; } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) { crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid); return FALSE; } if (pcmk__ends_with(then->uuid, "_stop_0") || pcmk__ends_with(then->uuid, "_demote_0")) { rsc = first->rsc; } else { rsc = then->rsc; } interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE); interleave = crm_is_true(interleave_s); crm_trace("Interleave %s -> %s: %s (based on %s)", first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id); return interleave; } enum pe_graph_flags pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set) { enum pe_graph_flags changed = pe_graph_none; crm_trace("%s -> %s", first->uuid, then->uuid); if(can_interleave_actions(first, then)) { changed = multi_update_interleave_actions(first, then, node, flags, filter, type, data_set); } else if(then->rsc) { GList *gIter = NULL; GList *children = NULL; // Handle the 'primitive' ordering case changed |= native_update_actions(first, then, node, flags, filter, type, data_set); // Now any children (or containers in the case of a bundle) children = get_containers_or_children(then->rsc); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *then_child = (pe_resource_t *) gIter->data; enum pe_graph_flags then_child_changed = pe_graph_none; pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node); if (then_child_action) { enum pe_action_flags then_child_flags = then_child->cmds->action_flags(then_child_action, node); if (pcmk_is_set(then_child_flags, pe_action_runnable)) { then_child_changed |= then_child->cmds->update_actions(first, then_child_action, node, flags, filter, type, data_set); } changed |= then_child_changed; if (then_child_changed & pe_graph_updated_then) { for (GList *lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) { pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data; pcmk__update_action_for_orderings(next->action, data_set); } } } } if(children != then->rsc->children) { g_list_free(children); } } return changed; } void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); pcmk__apply_location(constraint, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; if (replica->container) { replica->container->cmds->rsc_location(replica->container, constraint); } if (replica->ip) { replica->ip->cmds->rsc_location(replica->ip, constraint); } } if (bundle_data->child && ((constraint->role_filter == RSC_ROLE_UNPROMOTED) || (constraint->role_filter == RSC_ROLE_PROMOTED))) { bundle_data->child->cmds->rsc_location(bundle_data->child, constraint); bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location, constraint); } } void pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t * data_set) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); if (bundle_data->child) { bundle_data->child->cmds->expand(bundle_data->child, data_set); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->remote && replica->container && pe__bundle_needs_remote_name(replica->remote, data_set)) { /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that * run pacemaker-remoted inside, without needing a separate IP for * the container. This is done by configuring the inner remote's * connection host as the magic string "#uname", then * replacing it with the underlying host when needed. */ xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']", replica->remote->xml, LOG_ERR); const char *calculated_addr = NULL; // Replace the value in replica->remote->xml (if appropriate) calculated_addr = pe__add_bundle_remote_name(replica->remote, data_set, nvpair, "value"); if (calculated_addr) { /* Since this is for the bundle as a resource, and not any * particular action, replace the value in the default * parameters (not evaluated for node). create_graph_action() * will grab it from there to replace it in node-evaluated * parameters. */ GHashTable *params = pe_rsc_params(replica->remote, NULL, data_set); crm_trace("Set address for bundle connection %s to bundle host %s", replica->remote->id, calculated_addr); g_hash_table_replace(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR), strdup(calculated_addr)); } else { /* The only way to get here is if the remote connection is * neither currently running nor scheduled to run. That means we * won't be doing any operations that require addr (only start * requires it; we additionally use it to compare digests when * unpacking status, promote, and migrate_from history, but * that's already happened by this point). */ crm_info("Unable to determine address for bundle %s remote connection", rsc->id); } } if (replica->ip) { replica->ip->cmds->expand(replica->ip, data_set); } if (replica->container) { replica->container->cmds->expand(replica->container, data_set); } if (replica->remote) { replica->remote->cmds->expand(replica->remote, data_set); } } } gboolean pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t * data_set) { bool any_created = FALSE; pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return FALSE); get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->ip) { any_created |= replica->ip->cmds->create_probe(replica->ip, node, complete, force, data_set); } if (replica->child && (node->details == replica->node->details)) { any_created |= replica->child->cmds->create_probe(replica->child, node, complete, force, data_set); } if (replica->container) { bool created = replica->container->cmds->create_probe(replica->container, node, complete, force, data_set); if(created) { any_created = TRUE; /* If we're limited to one replica per host (due to * the lack of an IP range probably), then we don't * want any of our peer containers starting until * we've established that no other copies are already * running. * * Partly this is to ensure that nreplicas_per_host is * observed, but also to ensure that the containers * don't fail to start because the necessary port * mappings (which won't include an IP for uniqueness) * are already taken */ for (GList *tIter = bundle_data->replicas; tIter && (bundle_data->nreplicas_per_host == 1); tIter = tIter->next) { pe__bundle_replica_t *other = tIter->data; if ((other != replica) && (other != NULL) && (other->container != NULL)) { pcmk__new_ordering(replica->container, pcmk__op_key(replica->container->id, RSC_STATUS, 0), NULL, other->container, pcmk__op_key(other->container->id, RSC_START, 0), NULL, pe_order_optional|pe_order_same_node, data_set); } } } } if (replica->container && replica->remote && replica->remote->cmds->create_probe(replica->remote, node, complete, force, data_set)) { /* Do not probe the remote resource until we know where the * container is running. This is required for REMOTE_CONTAINER_HACK * to correctly probe remote resources. */ char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS, 0); pe_action_t *probe = find_first_action(replica->remote->actions, probe_uuid, NULL, node); free(probe_uuid); if (probe) { any_created = TRUE; crm_trace("Ordering %s probe on %s", replica->remote->id, node->details->uname); pcmk__new_ordering(replica->container, pcmk__op_key(replica->container->id, RSC_START, 0), NULL, replica->remote, NULL, probe, pe_order_probe, data_set); } } } return any_created; } void pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml) { } void pcmk__output_bundle_actions(pe_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pe__bundle_replica_t *replica = gIter->data; CRM_ASSERT(replica); if (replica->ip != NULL) { replica->ip->cmds->output_actions(replica->ip); } if (replica->container != NULL) { replica->container->cmds->output_actions(replica->container); } if (replica->remote != NULL) { replica->remote->cmds->output_actions(replica->remote); } if (replica->child != NULL) { replica->child->cmds->output_actions(replica->child); } } } // Bundle implementation of resource_alloc_functions_t:add_utilization() void pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { pe__bundle_variant_data_t *bundle_data = NULL; pe__bundle_replica_t *replica = NULL; if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } get_bundle_variant_data(bundle_data, rsc); if (bundle_data->replicas == NULL) { return; } /* All bundle replicas are identical, so using the utilization of the first * is sufficient for any. Only the implicit container resource can have * utilization values. */ replica = (pe__bundle_replica_t *) bundle_data->replicas->data; if (replica->container != NULL) { replica->container->cmds->add_utilization(replica->container, orig_rsc, all_rscs, utilization); } } // Bundle implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__bundle_shutdown_lock(pe_resource_t *rsc) { return; // Bundles currently don't support shutdown locks } diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c index b94a6d2ab0..85945b9581 100644 --- a/lib/pacemaker/pcmk_sched_clone.c +++ b/lib/pacemaker/pcmk_sched_clone.c @@ -1,1168 +1,1167 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "libpacemaker_private.h" #define VARIANT_CLONE 1 #include static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all); static pe_node_t * can_run_instance(pe_resource_t * rsc, pe_node_t * node, int limit) { pe_node_t *local_node = NULL; if (node == NULL && rsc->allowed_nodes) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) { can_run_instance(rsc, local_node, limit); } return NULL; } if (!node) { /* make clang analyzer happy */ goto bail; } else if (!pcmk__node_available(node, false, false)) { goto bail; } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) { goto bail; } local_node = pcmk__top_allowed_node(rsc, node); if (local_node == NULL) { crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname); goto bail; } else if (local_node->weight < 0) { common_update_score(rsc, node->details->id, local_node->weight); pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.", rsc->id, node->details->uname); } else if (local_node->count < limit) { pe_rsc_trace(rsc, "%s can run on %s (already running %d)", rsc->id, node->details->uname, local_node->count); return local_node; } else { pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)", rsc->id, node->details->uname, local_node->count, limit); } bail: if (node) { common_update_score(rsc, node->details->id, -INFINITY); } return NULL; } static pe_node_t * allocate_instance(pe_resource_t *rsc, pe_node_t *prefer, gboolean all_coloc, int limit, pe_working_set_t *data_set) { pe_node_t *chosen = NULL; GHashTable *backup = NULL; CRM_ASSERT(rsc); pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)", rsc->id, (prefer? prefer->details->uname: "none"), (all_coloc? "all" : "some")); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return rsc->fns->location(rsc, NULL, FALSE); } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } /* Only include positive colocation preferences of dependent resources * if not every node will get a copy of the clone */ append_parent_colocation(rsc->parent, rsc, all_coloc); if (prefer) { pe_node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id); if (local_prefer == NULL || local_prefer->weight < 0) { pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id, prefer->details->uname); return NULL; } } can_run_instance(rsc, NULL, limit); backup = pcmk__copy_node_table(rsc->allowed_nodes); pe_rsc_trace(rsc, "Allocating instance %s", rsc->id); chosen = rsc->cmds->allocate(rsc, prefer, data_set); if (chosen && prefer && (chosen->details != prefer->details)) { crm_info("Not pre-allocating %s to %s because %s is better", rsc->id, prefer->details->uname, chosen->details->uname); g_hash_table_destroy(rsc->allowed_nodes); rsc->allowed_nodes = backup; pcmk__unassign_resource(rsc); chosen = NULL; backup = NULL; } if (chosen) { pe_node_t *local_node = pcmk__top_allowed_node(rsc, chosen); if (local_node) { local_node->count++; } else if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { /* what to do? we can't enforce per-node limits in this case */ pcmk__config_err("%s not found in %s (list of %d)", chosen->details->id, rsc->parent->id, g_hash_table_size(rsc->parent->allowed_nodes)); } } if(backup) { g_hash_table_destroy(backup); } return chosen; } static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all) { GList *gIter = NULL; gIter = rsc->rsc_cons; for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; if (all || cons->score < 0 || cons->score == INFINITY) { child->rsc_cons = g_list_prepend(child->rsc_cons, cons); } } gIter = rsc->rsc_cons_lhs; for (; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data; if (!pcmk__colocation_has_influence(cons, child)) { continue; } if (all || cons->score < 0) { child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons); } } } void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes, int max, int per_host_max, pe_working_set_t * data_set); void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes, int max, int per_host_max, pe_working_set_t * data_set) { int loop_max = 0; int allocated = 0; int available_nodes = 0; bool all_coloc = false; /* count now tracks the number of clones currently allocated */ for(GList *nIter = nodes; nIter != NULL; nIter = nIter->next) { pe_node_t *node = nIter->data; node->count = 0; if (pcmk__node_available(node, false, false)) { available_nodes++; } } all_coloc = (max < available_nodes) ? true : false; if(available_nodes) { loop_max = max / available_nodes; } if (loop_max < 1) { loop_max = 1; } pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)", max, rsc->id, available_nodes, per_host_max, loop_max); /* Pre-allocate as many instances as we can to their current location */ for (GList *gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; pe_node_t *child_node = NULL; pe_node_t *local_node = NULL; if ((child->running_on == NULL) || !pcmk_is_set(child->flags, pe_rsc_provisional) || pcmk_is_set(child->flags, pe_rsc_failed)) { continue; } child_node = pe__current_node(child); local_node = pcmk__top_allowed_node(child, child_node); pe_rsc_trace(rsc, "Checking pre-allocation of %s to %s (%d remaining of %d)", child->id, child_node->details->uname, max - allocated, max); if (!pcmk__node_available(child_node, true, false)) { pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s", child_node->details->uname, child->id); continue; } if ((local_node != NULL) && (local_node->count >= loop_max)) { pe_rsc_trace(rsc, "Not pre-allocating because %s already allocated " "optimal instances", child_node->details->uname); continue; } if (allocate_instance(child, child_node, all_coloc, per_host_max, data_set)) { pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id, child_node->details->uname); allocated++; } } pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max); for (GList *gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; if (child->running_on != NULL) { pe_node_t *child_node = pe__current_node(child); pe_node_t *local_node = pcmk__top_allowed_node(child, child_node); if (local_node == NULL) { crm_err("%s is running on %s which isn't allowed", child->id, child_node->details->uname); } } if (!pcmk_is_set(child->flags, pe_rsc_provisional)) { } else if (allocated >= max) { pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max); resource_location(child, NULL, -INFINITY, "clone:limit_reached", data_set); } else { if (allocate_instance(child, NULL, all_coloc, per_host_max, data_set)) { allocated++; } } } pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d", allocated, rsc->id, max); } pe_node_t * pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set) { GList *nodes = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return NULL; } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) { pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id); return NULL; } if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__add_promotion_scores(rsc); } pe__set_resource_flags(rsc, pe_rsc_allocating); /* This information is used by pcmk__cmp_instance() when deciding the order * in which to assign clone instances to nodes. */ for (GList *gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; pe_rsc_trace(rsc, "%s: Allocating %s first", rsc->id, constraint->primary->id); constraint->primary->cmds->allocate(constraint->primary, prefer, data_set); } for (GList *gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data; if (pcmk__colocation_has_influence(constraint, NULL)) { pcmk__apply_colocation(constraint, rsc, constraint->dependent, pe_weights_rollback|pe_weights_positive); } } pe__show_node_weights(!pcmk_is_set(data_set->flags, pe_flag_show_scores), rsc, __func__, rsc->allowed_nodes, data_set); nodes = g_hash_table_get_values(rsc->allowed_nodes); nodes = pcmk__sort_nodes(nodes, NULL, data_set); rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance); distribute_children(rsc, rsc->children, nodes, clone_data->clone_max, clone_data->clone_node_max, data_set); g_list_free(nodes); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__set_instance_roles(rsc); } pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating); pe_rsc_trace(rsc, "Done allocating %s", rsc->id); return NULL; } static void clone_update_pseudo_status(pe_resource_t * rsc, gboolean * stopping, gboolean * starting, gboolean * active) { GList *gIter = NULL; if (rsc->children) { gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; clone_update_pseudo_status(child, stopping, starting, active); } return; } CRM_ASSERT(active != NULL); CRM_ASSERT(starting != NULL); CRM_ASSERT(stopping != NULL); if (rsc->running_on) { *active = TRUE; } gIter = rsc->actions; for (; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (*starting && *stopping) { return; } else if (pcmk_is_set(action->flags, pe_action_optional)) { pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid); continue; } else if (!pcmk_any_flags_set(action->flags, pe_action_pseudo|pe_action_runnable)) { pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid); continue; } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)) { pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid); *stopping = TRUE; } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)) { if (!pcmk_is_set(action->flags, pe_action_runnable)) { pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d", action->uuid, pcmk_is_set(action->flags, pe_action_runnable), pcmk_is_set(action->flags, pe_action_pseudo)); } else { pe_rsc_trace(rsc, "Starting due to: %s", action->uuid); pe_rsc_trace(rsc, "%s run=%d, pseudo=%d", action->uuid, pcmk_is_set(action->flags, pe_action_runnable), pcmk_is_set(action->flags, pe_action_pseudo)); *starting = TRUE; } } } } static pe_action_t * find_rsc_action(pe_resource_t *rsc, const char *task) { pe_action_t *match = NULL; GList *actions = pe__resource_actions(rsc, NULL, task, FALSE); for (GList *item = actions; item != NULL; item = item->next) { pe_action_t *op = (pe_action_t *) item->data; if (!pcmk_is_set(op->flags, pe_action_optional)) { if (match != NULL) { // More than one match, don't return any match = NULL; break; } match = op; } } g_list_free(actions); return match; } static void child_ordering_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *stop = NULL; pe_action_t *start = NULL; pe_action_t *last_stop = NULL; pe_action_t *last_start = NULL; GList *gIter = NULL; if (!pe__clone_is_ordered(rsc)) { return; } /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number); for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; stop = find_rsc_action(child, RSC_STOP); if (stop) { if (last_stop) { /* child/child relative stop */ order_actions(stop, last_stop, pe_order_optional); } last_stop = stop; } start = find_rsc_action(child, RSC_START); if (start) { if (last_start) { /* child/child relative start */ order_actions(last_start, start, pe_order_optional); } last_start = start; } } } void clone_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set) { clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify, &clone_data->stop_notify,data_set); child_ordering_constraints(rsc, data_set); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { create_promotable_actions(rsc, data_set); } } void clone_create_pseudo_actions( pe_resource_t * rsc, GList *children, notify_data_t **start_notify, notify_data_t **stop_notify, pe_working_set_t * data_set) { gboolean child_active = FALSE; gboolean child_starting = FALSE; gboolean child_stopping = FALSE; gboolean allow_dependent_migrations = TRUE; pe_action_t *stop = NULL; pe_action_t *stopped = NULL; pe_action_t *start = NULL; pe_action_t *started = NULL; pe_rsc_trace(rsc, "Creating actions for %s", rsc->id); for (GList *gIter = children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; gboolean starting = FALSE; gboolean stopping = FALSE; child_rsc->cmds->create_actions(child_rsc, data_set); clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active); if (stopping && starting) { allow_dependent_migrations = FALSE; } child_stopping |= stopping; child_starting |= starting; } /* start */ - start = pcmk__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true); - started = pcmk__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting, - false); + start = pe__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true); + started = pe__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting, + false); started->priority = INFINITY; if (child_active || child_starting) { pe__set_action_flags(started, pe_action_runnable); } if (start_notify != NULL && *start_notify == NULL) { - *start_notify = pcmk__clone_notif_pseudo_ops(rsc, RSC_START, start, - started); + *start_notify = pe__clone_notif_pseudo_ops(rsc, RSC_START, start, + started); } /* stop */ - stop = pcmk__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true); - stopped = pcmk__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping, - true); + stop = pe__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true); + stopped = pe__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping, + true); stopped->priority = INFINITY; if (allow_dependent_migrations) { pe__set_action_flags(stop, pe_action_migrate_runnable); } if (stop_notify != NULL && *stop_notify == NULL) { - *stop_notify = pcmk__clone_notif_pseudo_ops(rsc, RSC_STOP, stop, - stopped); + *stop_notify = pe__clone_notif_pseudo_ops(rsc, RSC_STOP, stop, stopped); if (start_notify && *start_notify && *stop_notify) { order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional); } } } void clone_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set) { pe_resource_t *last_rsc = NULL; GList *gIter; bool ordered = pe__clone_is_ordered(rsc); pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id); pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set); pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_runnable_left, data_set); } if (ordered) { /* we have to maintain a consistent sorted child list when building order constraints */ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number); } for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->internal_constraints(child_rsc, data_set); pcmk__order_starts(rsc, child_rsc, pe_order_runnable_left|pe_order_implies_first_printed, data_set); pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED, pe_order_implies_then_printed, data_set); if (ordered && (last_rsc != NULL)) { pcmk__order_starts(last_rsc, child_rsc, pe_order_optional, data_set); } pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed, data_set); pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_implies_then_printed, data_set); if (ordered && (last_rsc != NULL)) { pcmk__order_stops(child_rsc, last_rsc, pe_order_optional, data_set); } last_rsc = child_rsc; } if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { promotable_constraints(rsc, data_set); } } gboolean is_child_compatible(pe_resource_t *child_rsc, pe_node_t * local_node, enum rsc_role_e filter, gboolean current) { pe_node_t *node = NULL; enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current); CRM_CHECK(child_rsc && local_node, return FALSE); if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { /* We only want instances that haven't failed */ node = child_rsc->fns->location(child_rsc, NULL, current); } if (filter != RSC_ROLE_UNKNOWN && next_role != filter) { crm_trace("Filtered %s", child_rsc->id); return FALSE; } if (node && (node->details == local_node->details)) { return TRUE; } else if (node) { crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname, local_node->details->uname); } else { crm_trace("%s - not allocated %d", child_rsc->id, current); } return FALSE; } pe_resource_t * find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc, enum rsc_role_e filter, gboolean current, pe_working_set_t *data_set) { pe_resource_t *pair = NULL; GList *gIter = NULL; GList *scratch = NULL; pe_node_t *local_node = NULL; local_node = local_child->fns->location(local_child, NULL, current); if (local_node) { return find_compatible_child_by_node(local_child, local_node, rsc, filter, current); } scratch = g_hash_table_get_values(local_child->allowed_nodes); scratch = pcmk__sort_nodes(scratch, NULL, data_set); gIter = scratch; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; pair = find_compatible_child_by_node(local_child, node, rsc, filter, current); if (pair) { goto done; } } pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id); done: g_list_free(scratch); return pair; } void clone_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { /* -- Never called -- * * Instead we add the colocation constraints to the child and call from there */ CRM_ASSERT(FALSE); } void clone_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { GList *gIter = NULL; gboolean do_interleave = FALSE; const char *interleave_s = NULL; CRM_CHECK(constraint != NULL, return); CRM_CHECK(dependent != NULL, pe_err("dependent was NULL for %s", constraint->id); return); CRM_CHECK(primary != NULL, pe_err("primary was NULL for %s", constraint->id); return); CRM_CHECK(dependent->variant == pe_native, return); pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d", constraint->id, dependent->id, primary->id, constraint->score); if (pcmk_is_set(primary->flags, pe_rsc_promotable)) { if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { pe_rsc_trace(primary, "%s is still provisional", primary->id); return; } else if (constraint->primary_role == RSC_ROLE_UNKNOWN) { pe_rsc_trace(primary, "Handling %s as a clone colocation", constraint->id); } else { promotable_colocation_rh(dependent, primary, constraint, data_set); return; } } /* only the LHS side needs to be labeled as interleave */ interleave_s = g_hash_table_lookup(constraint->dependent->meta, XML_RSC_ATTR_INTERLEAVE); if (crm_is_true(interleave_s) && (constraint->dependent->variant > pe_group)) { /* @TODO Do we actually care about multiple primary copies sharing a * dependent copy anymore? */ if (copies_per_node(constraint->dependent) != copies_per_node(constraint->primary)) { pcmk__config_err("Cannot interleave %s and %s because they do not " "support the same number of instances per node", constraint->dependent->id, constraint->primary->id); } else { do_interleave = TRUE; } } if (pcmk_is_set(primary->flags, pe_rsc_provisional)) { pe_rsc_trace(primary, "%s is still provisional", primary->id); return; } else if (do_interleave) { pe_resource_t *primary_instance = NULL; primary_instance = find_compatible_child(dependent, primary, RSC_ROLE_UNKNOWN, FALSE, data_set); if (primary_instance != NULL) { pe_rsc_debug(primary, "Pairing %s with %s", dependent->id, primary_instance->id); dependent->cmds->rsc_colocation_lh(dependent, primary_instance, constraint, data_set); } else if (constraint->score >= INFINITY) { crm_notice("Cannot pair %s with instance of %s", dependent->id, primary->id); pcmk__assign_resource(dependent, NULL, true); } else { pe_rsc_debug(primary, "Cannot pair %s with instance of %s", dependent->id, primary->id); } return; } else if (constraint->score >= INFINITY) { GList *affected_nodes = NULL; gIter = primary->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) { pe_rsc_trace(primary, "Allowing %s: %s %d", constraint->id, chosen->details->uname, chosen->weight); affected_nodes = g_list_prepend(affected_nodes, chosen); } } node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE); g_list_free(affected_nodes); return; } gIter = primary->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->rsc_colocation_rh(dependent, child_rsc, constraint, data_set); } } enum action_tasks clone_child_action(pe_action_t * action) { enum action_tasks result = no_action; pe_resource_t *child = (pe_resource_t *) action->rsc->children->data; if (pcmk__strcase_any_of(action->task, "notify", "notified", NULL)) { /* Find the action we're notifying about instead */ int stop = 0; char *key = action->uuid; int lpc = strlen(key); for (; lpc > 0; lpc--) { if (key[lpc] == '_' && stop == 0) { stop = lpc; } else if (key[lpc] == '_') { char *task_mutable = NULL; lpc++; task_mutable = strdup(key + lpc); task_mutable[stop - lpc] = 0; crm_trace("Extracted action '%s' from '%s'", task_mutable, key); result = get_complex_task(child, task_mutable, TRUE); free(task_mutable); break; } } } else { result = get_complex_task(child, action->task, TRUE); } return result; } #define pe__clear_action_summary_flags(flags, action, flag) do { \ flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "Action summary", action->rsc->id, \ flags, flag, #flag); \ } while (0) enum pe_action_flags summary_action_flags(pe_action_t * action, GList *children, pe_node_t * node) { GList *gIter = NULL; gboolean any_runnable = FALSE; gboolean check_runnable = TRUE; enum action_tasks task = clone_child_action(action); enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo); const char *task_s = task2text(task); for (gIter = children; gIter != NULL; gIter = gIter->next) { pe_action_t *child_action = NULL; pe_resource_t *child = (pe_resource_t *) gIter->data; child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node); pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id, node ? node->details->uname : "none", child_action?child_action->uuid:"NA"); if (child_action) { enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); if (pcmk_is_set(flags, pe_action_optional) && !pcmk_is_set(child_flags, pe_action_optional)) { pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid, child_action->uuid); pe__clear_action_summary_flags(flags, action, pe_action_optional); pe__clear_action_flags(action, pe_action_optional); } if (pcmk_is_set(child_flags, pe_action_runnable)) { any_runnable = TRUE; } } } if (check_runnable && any_runnable == FALSE) { pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid); pe__clear_action_summary_flags(flags, action, pe_action_runnable); if (node == NULL) { pe__clear_action_flags(action, pe_action_runnable); } } return flags; } enum pe_action_flags clone_action_flags(pe_action_t * action, pe_node_t * node) { return summary_action_flags(action, action->rsc->children, node); } void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint) { GList *gIter = rsc->children; pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id); pcmk__apply_location(constraint, rsc); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->rsc_location(child_rsc, constraint); } } void clone_expand(pe_resource_t * rsc, pe_working_set_t * data_set) { GList *gIter = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL); - pcmk__create_notifications(rsc, clone_data->start_notify); - pcmk__create_notifications(rsc, clone_data->stop_notify); - pcmk__create_notifications(rsc, clone_data->promote_notify); - pcmk__create_notifications(rsc, clone_data->demote_notify); + pe__create_notifications(rsc, clone_data->start_notify); + pe__create_notifications(rsc, clone_data->stop_notify); + pe__create_notifications(rsc, clone_data->promote_notify); + pe__create_notifications(rsc, clone_data->demote_notify); /* Now that the notifcations have been created we can expand the children */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->cmds->expand(child_rsc, data_set); } native_expand(rsc, data_set); /* The notifications are in the graph now, we can destroy the notify_data */ - pcmk__free_notification_data(clone_data->demote_notify); + pe__free_notification_data(clone_data->demote_notify); clone_data->demote_notify = NULL; - pcmk__free_notification_data(clone_data->stop_notify); + pe__free_notification_data(clone_data->stop_notify); clone_data->stop_notify = NULL; - pcmk__free_notification_data(clone_data->start_notify); + pe__free_notification_data(clone_data->start_notify); clone_data->start_notify = NULL; - pcmk__free_notification_data(clone_data->promote_notify); + pe__free_notification_data(clone_data->promote_notify); clone_data->promote_notify = NULL; } // Check whether a resource or any of its children is known on node static bool rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node) { if (rsc->children) { for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { pe_resource_t *child = (pe_resource_t *) child_iter->data; if (rsc_known_on(child, node)) { return TRUE; } } } else if (rsc->known_on) { GHashTableIter iter; pe_node_t *known_node = NULL; g_hash_table_iter_init(&iter, rsc->known_on); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) { if (node->details == known_node->details) { return TRUE; } } } return FALSE; } // Look for an instance of clone that is known on node static pe_resource_t * find_instance_on(const pe_resource_t *clone, const pe_node_t *node) { for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child = (pe_resource_t *) gIter->data; if (rsc_known_on(child, node)) { return child; } } return NULL; } // For unique clones, probe each instance separately static gboolean probe_unique_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set) { gboolean any_created = FALSE; for (GList *child_iter = rsc->children; child_iter != NULL; child_iter = child_iter->next) { pe_resource_t *child = (pe_resource_t *) child_iter->data; any_created |= child->cmds->create_probe(child, node, complete, force, data_set); } return any_created; } // For anonymous clones, only a single instance needs to be probed static gboolean probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set) { // First, check if we probed an instance on this node last time pe_resource_t *child = find_instance_on(rsc, node); // Otherwise, check if we plan to start an instance on this node if (child == NULL) { for (GList *child_iter = rsc->children; child_iter && !child; child_iter = child_iter->next) { pe_node_t *local_node = NULL; pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data; if (child_rsc) { /* make clang analyzer happy */ local_node = child_rsc->fns->location(child_rsc, NULL, FALSE); if (local_node && (local_node->details == node->details)) { child = child_rsc; } } } } // Otherwise, use the first clone instance if (child == NULL) { child = rsc->children->data; } CRM_ASSERT(child); return child->cmds->create_probe(child, node, complete, force, data_set); } gboolean clone_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete, gboolean force, pe_working_set_t * data_set) { gboolean any_created = FALSE; CRM_ASSERT(rsc); rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number); if (rsc->children == NULL) { pe_warn("Clone %s has no children", rsc->id); return FALSE; } if (rsc->exclusive_discover) { pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) { /* exclusive discover is enabled and this node is not marked * as a node this resource should be discovered on * * remove the node from allowed_nodes so that the * notification contains only nodes that we might ever run * on */ g_hash_table_remove(rsc->allowed_nodes, node->details->id); /* Bit of a shortcut - might as well take it */ return FALSE; } } if (pcmk_is_set(rsc->flags, pe_rsc_unique)) { any_created = probe_unique_clone(rsc, node, complete, force, data_set); } else { any_created = probe_anonymous_clone(rsc, node, complete, force, data_set); } return any_created; } void clone_append_meta(pe_resource_t * rsc, xmlNode * xml) { char *name = NULL; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); name = crm_meta_name(XML_RSC_ATTR_UNIQUE); crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique)); free(name); name = crm_meta_name(XML_RSC_ATTR_NOTIFY); crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify)); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX); crm_xml_add_int(xml, name, clone_data->clone_max); free(name); name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX); crm_xml_add_int(xml, name, clone_data->clone_node_max); free(name); if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) { int promoted_max = pe__clone_promoted_max(rsc); int promoted_node_max = pe__clone_promoted_node_max(rsc); name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX); crm_xml_add_int(xml, name, promoted_max); free(name); name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX); crm_xml_add_int(xml, name, promoted_node_max); free(name); /* @COMPAT Maintain backward compatibility with resource agents that * expect the old names (deprecated since 2.0.0). */ name = crm_meta_name(PCMK_XE_PROMOTED_MAX_LEGACY); crm_xml_add_int(xml, name, promoted_max); free(name); name = crm_meta_name(PCMK_XE_PROMOTED_NODE_MAX_LEGACY); crm_xml_add_int(xml, name, promoted_node_max); free(name); } } // Clone implementation of resource_alloc_functions_t:add_utilization() void pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization) { bool existing = false; pe_resource_t *child = NULL; if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) { return; } // Look for any child already existing in the list for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { child = (pe_resource_t *) iter->data; if (g_list_find(all_rscs, child)) { existing = true; // Keep checking remaining children } else { // If this is a clone of a group, look for group's members for (GList *member_iter = child->children; member_iter != NULL; member_iter = member_iter->next) { pe_resource_t *member = (pe_resource_t *) member_iter->data; if (g_list_find(all_rscs, member) != NULL) { // Add *child's* utilization, not group member's child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization); existing = true; break; } } } } if (!existing && (rsc->children != NULL)) { // If nothing was found, still add first child's utilization child = (pe_resource_t *) rsc->children->data; child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization); } } // Clone implementation of resource_alloc_functions_t:shutdown_lock() void pcmk__clone_shutdown_lock(pe_resource_t *rsc) { return; // Clones currently don't support shutdown locks } diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c index 19cba1bc7e..c2a3ff17c0 100644 --- a/lib/pacemaker/pcmk_sched_fencing.c +++ b/lib/pacemaker/pcmk_sched_fencing.c @@ -1,453 +1,453 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Check whether a resource is known on a particular node * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return TRUE if resource (or parent if an anonymous clone) is known */ static bool rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node) { if (pe_hash_table_lookup(rsc->known_on, node->details->id)) { return TRUE; } else if ((rsc->variant == pe_native) && pe_rsc_is_anon_clone(rsc->parent) && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) { /* We check only the parent, not the uber-parent, because we cannot * assume that the resource is known if it is in an anonymously cloned * group (which may be only partially known). */ return TRUE; } return FALSE; } /*! * \internal * \brief Order a resource's start and promote actions relative to fencing * * \param[in] rsc Resource to be ordered * \param[in] stonith_op Fence action * \param[in] data_set Cluster working set */ static void order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set) { pe_node_t *target; GList *gIter = NULL; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; switch (action->needs) { case rsc_req_nothing: // Anything other than start or promote requires nothing break; case rsc_req_stonith: order_actions(stonith_op, action, pe_order_optional); break; case rsc_req_quorum: if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id) && !rsc_is_known_on(rsc, target)) { /* If we don't know the status of the resource on the node * we're about to shoot, we have to assume it may be active * there. Order the resource start after the fencing. This * is analogous to waiting for all the probes for a resource * to complete before starting it. * * The most likely explanation is that the DC died and took * its status with it. */ pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid, target->details->uname); order_actions(stonith_op, action, pe_order_optional | pe_order_runnable_left); } break; } } } /*! * \internal * \brief Order a resource's stop and demote actions relative to fencing * * \param[in] rsc Resource to be ordered * \param[in] stonith_op Fence action * \param[in] data_set Cluster working set */ static void order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set) { GList *gIter = NULL; GList *action_list = NULL; bool order_implicit = false; pe_resource_t *top = uber_parent(rsc); pe_action_t *parent_stop = NULL; pe_node_t *target; CRM_CHECK(stonith_op && stonith_op->node, return); target = stonith_op->node; /* Get a list of stop actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE); /* If resource requires fencing, implicit actions must occur after fencing. * * Implied stops and demotes of resources running on guest nodes are always * ordered after fencing, even if the resource does not require fencing, * because guest node "fencing" is actually just a resource stop. */ if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) { order_implicit = true; } if (action_list && order_implicit) { parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); } for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; // The stop would never complete, so convert it into a pseudo-action. pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable); if (order_implicit) { pe__set_action_flags(action, pe_action_implied_by_stonith); /* Order the stonith before the parent stop (if any). * * Also order the stonith before the resource stop, unless the * resource is inside a bundle -- that would cause a graph loop. * We can rely on the parent stop's ordering instead. * * User constraints must not order a resource in a guest node * relative to the guest node container resource. The * pe_order_preserve flag marks constraints as generated by the * cluster and thus immune to that check (and is irrelevant if * target is not a guest). */ if (!pe_rsc_is_bundled(rsc)) { order_actions(stonith_op, action, pe_order_preserve); } order_actions(stonith_op, parent_stop, pe_order_preserve); } if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { crm_notice("Stop of failed resource %s is implicit %s %s is fenced", rsc->id, (order_implicit? "after" : "because"), target->details->uname); } else { crm_info("%s is implicit %s %s is fenced", action->uuid, (order_implicit? "after" : "because"), target->details->uname); } if (pcmk_is_set(rsc->flags, pe_rsc_notify)) { - pcmk__order_notifs_after_fencing(action, rsc, stonith_op); + pe__order_notifs_after_fencing(action, rsc, stonith_op); } #if 0 /* It might be a good idea to stop healthy resources on a node about to * be fenced, when possible. * * However, fencing must be done before a failed resource's * (pseudo-)stop action, so that could create a loop. For example, given * a group of A and B running on node N with a failed stop of B: * * fence N -> stop B (pseudo-op) -> stop A -> fence N * * The block below creates the stop A -> fence N ordering and therefore * must (at least for now) be disabled. Instead, run the block above and * treat all resources on N as B would be (i.e., as a pseudo-op after * the fencing). * * @TODO Maybe break the "A requires B" dependency in * pcmk__update_action_for_orderings() and use this block for healthy * resources instead of the above. */ crm_info("Moving healthy resource %s off %s before fencing", rsc->id, node->details->uname); pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, strdup(CRM_OP_FENCE), stonith_op, pe_order_optional, data_set); #endif } g_list_free(action_list); /* Get a list of demote actions potentially implied by the fencing */ action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE); for (gIter = action_list; gIter != NULL; gIter = gIter->next) { pe_action_t *action = (pe_action_t *) gIter->data; if (!(action->node->details->online) || action->node->details->unclean || pcmk_is_set(rsc->flags, pe_rsc_failed)) { if (pcmk_is_set(rsc->flags, pe_rsc_failed)) { pe_rsc_info(rsc, "Demote of failed resource %s is implicit after %s is fenced", rsc->id, target->details->uname); } else { pe_rsc_info(rsc, "%s is implicit after %s is fenced", action->uuid, target->details->uname); } /* The demote would never complete and is now implied by the * fencing, so convert it into a pseudo-action. */ pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable); if (pe_rsc_is_bundled(rsc)) { // Do nothing, let recovery be ordered after parent's implied stop } else if (order_implicit) { order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); } } } g_list_free(action_list); } /*! * \internal * \brief Order resource actions properly relative to fencing * * \param[in] rsc Resource whose actions should be ordered * \param[in] stonith_op Fencing operation to be ordered against * \param[in] data_set Cluster working set */ static void rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set) { if (rsc->children) { GList *gIter = NULL; for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; rsc_stonith_ordering(child_rsc, stonith_op, data_set); } } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id); } else { order_start_vs_fencing(rsc, stonith_op, data_set); order_stop_vs_fencing(rsc, stonith_op, data_set); } } /*! * \internal * \brief Order all actions appropriately relative to a fencing operation * * Ensure start operations of affected resources are ordered after fencing, * imply stop and demote operations of affected resources by marking them as * pseudo-actions, etc. * * \param[in] stonith_op Fencing operation * \param[in,out] data_set Working set of cluster */ void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set) { CRM_CHECK(stonith_op && data_set, return); for (GList *r = data_set->resources; r != NULL; r = r->next) { rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op, data_set); } } /*! * \internal * \brief Order an action after unfencing * * \param[in] rsc Resource that action is for * \param[in] node Node that action is on * \param[in] action Action to be ordered after unfencing * \param[in] order Ordering flags * \param[in] data_set Cluster working set */ void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order, pe_working_set_t *data_set) { /* When unfencing is in use, we order unfence actions before any probe or * start of resources that require unfencing, and also of fence devices. * * This might seem to violate the principle that fence devices require * only quorum. However, fence agents that unfence often don't have enough * information to even probe or start unless the node is first unfenced. */ if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device) && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) || pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) { /* Start with an optional ordering. Requiring unfencing would result in * the node being unfenced, and all its resources being stopped, * whenever a new resource is added -- which would be highly suboptimal. */ pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set); order_actions(unfence, action, order); if (!pcmk__node_unfenced(node)) { // But unfencing is required if it has never been done char *reason = crm_strdup_printf("required by %s %s", rsc->id, action->task); trigger_unfencing(NULL, node, reason, NULL, data_set); free(reason); } } } /*! * \internal * \brief Create pseudo-op for guest node fence, and order relative to it * * \param[in] node Guest node to fence */ void pcmk__fence_guest(pe_node_t *node) { pe_resource_t *container = NULL; pe_action_t *stop = NULL; pe_action_t *stonith_op = NULL; /* The fence action is just a label; we don't do anything differently for * off vs. reboot. We specify it explicitly, rather than let it default to * cluster's default action, because we are not _initiating_ fencing -- we * are creating a pseudo-event to describe fencing that is already occurring * by other means (container recovery). */ const char *fence_action = "off"; CRM_ASSERT(node != NULL); /* Check whether guest's container resource has any explicit stop or * start (the stop may be implied by fencing of the guest's host). */ container = node->details->remote_rsc->container; if (container) { stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL); if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) { fence_action = "reboot"; } } /* Create a fence pseudo-event, so we have an event to order actions * against, and the controller can always detect it. */ stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", FALSE, node->details->data_set); pe__set_action_flags(stonith_op, pe_action_pseudo|pe_action_runnable); /* We want to imply stops/demotes after the guest is stopped, not wait until * it is restarted, so we always order pseudo-fencing after stop, not start * (even though start might be closer to what is done for a real reboot). */ if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) { pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, FALSE, node->details->data_set); crm_info("Implying guest node %s is down (action %d) after %s fencing", node->details->uname, stonith_op->id, stop->node->details->uname); order_actions(parent_stonith_op, stonith_op, pe_order_runnable_left|pe_order_implies_then); } else if (stop) { order_actions(stop, stonith_op, pe_order_runnable_left|pe_order_implies_then); crm_info("Implying guest node %s is down (action %d) " "after container %s is stopped (action %d)", node->details->uname, stonith_op->id, container->id, stop->id); } else { /* If we're fencing the guest node but there's no stop for the guest * resource, we must think the guest is already stopped. However, we may * think so because its resource history was just cleaned. To avoid * unnecessarily considering the guest node down if it's really up, * order the pseudo-fencing after any stop of the connection resource, * which will be ordered after any container (re-)probe. */ stop = find_first_action(node->details->remote_rsc->actions, NULL, RSC_STOP, NULL); if (stop) { order_actions(stop, stonith_op, pe_order_optional); crm_info("Implying guest node %s is down (action %d) " "after connection is stopped (action %d)", node->details->uname, stonith_op->id, stop->id); } else { /* Not sure why we're fencing, but everything must already be * cleanly stopped. */ crm_info("Implying guest node %s is down (action %d) ", node->details->uname, stonith_op->id); } } // Order/imply other actions relative to pseudo-fence as with real fence pcmk__order_vs_fence(stonith_op, node->details->data_set); } /*! * \internal * \brief Check whether node has already been unfenced * * \param[in] node Node to check * * \return true if node has a nonzero #node-unfenced attribute (or none), * otherwise false */ bool pcmk__node_unfenced(pe_node_t *node) { const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED); return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches); } diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c index 86746dc76e..460a186725 100644 --- a/lib/pacemaker/pcmk_sched_promotable.c +++ b/lib/pacemaker/pcmk_sched_promotable.c @@ -1,1252 +1,1250 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "libpacemaker_private.h" #define VARIANT_CLONE 1 #include /*! * \internal * \brief Add implicit promotion ordering for a promotable instance * * \param[in] clone Clone resource * \param[in] child Instance of \p clone being ordered * \param[in] last Previous instance ordered (NULL if \p child is first) */ static void order_instance_promotion(pe_resource_t *clone, pe_resource_t *child, pe_resource_t *last) { // "Promote clone" -> promote instance -> "clone promoted" pcmk__order_resource_actions(clone, RSC_PROMOTE, child, RSC_PROMOTE, pe_order_optional, clone->cluster); pcmk__order_resource_actions(child, RSC_PROMOTE, clone, RSC_PROMOTED, pe_order_optional, clone->cluster); // If clone is ordered, order this instance relative to last if ((last != NULL) && pe__clone_is_ordered(clone)) { pcmk__order_resource_actions(last, RSC_PROMOTE, child, RSC_PROMOTE, pe_order_optional, clone->cluster); } } /*! * \internal * \brief Add implicit demotion ordering for a promotable instance * * \param[in] clone Clone resource * \param[in] child Instance of \p clone being ordered * \param[in] last Previous instance ordered (NULL if \p child is first) */ static void order_instance_demotion(pe_resource_t *clone, pe_resource_t *child, pe_resource_t *last) { // "Demote clone" -> demote instance -> "clone demoted" pcmk__order_resource_actions(clone, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed, clone->cluster); pcmk__order_resource_actions(child, RSC_DEMOTE, clone, RSC_DEMOTED, pe_order_implies_then_printed, clone->cluster); // If clone is ordered, order this instance relative to last if ((last != NULL) && pe__clone_is_ordered(clone)) { pcmk__order_resource_actions(child, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional, clone->cluster); } } /*! * \internal * \brief Check whether an instance will be promoted or demoted * * \param[in] rsc Instance to check * \param[in] demoting If \p rsc will be demoted, this will be set to true * \param[in] promoting If \p rsc will be promoted, this will be set to true */ static void check_for_role_change(pe_resource_t *rsc, bool *demoting, bool *promoting) { GList *iter = NULL; // If this is a cloned group, check group members recursively if (rsc->children != NULL) { for (iter = rsc->children; iter != NULL; iter = iter->next) { check_for_role_change((pe_resource_t *) iter->data, demoting, promoting); } return; } for (iter = rsc->actions; iter != NULL; iter = iter->next) { pe_action_t *action = (pe_action_t *) iter->data; if (*promoting && *demoting) { return; } else if (pcmk_is_set(action->flags, pe_action_optional)) { continue; } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_none)) { *demoting = true; } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_none)) { *promoting = true; } } } /*! * \internal * \brief Add promoted-role location constraint scores to an instance's priority * * Adjust a promotable clone instance's promotion priority by the scores of any * location constraints in a list that are both limited to the promoted role and * for the node where the instance will be placed. * * \param[in] child Promotable clone instance * \param[in] location_constraints List of location constraints to apply * \param[in] chosen Node where \p child will be placed */ static void apply_promoted_locations(pe_resource_t *child, GList *location_constraints, pe_node_t *chosen) { for (GList *iter = location_constraints; iter; iter = iter->next) { pe__location_t *location = iter->data; pe_node_t *weighted_node = NULL; if (location->role_filter == RSC_ROLE_PROMOTED) { weighted_node = pe_find_node_id(location->node_list_rh, chosen->details->id); } if (weighted_node != NULL) { int new_priority = pcmk__add_scores(child->priority, weighted_node->weight); pe_rsc_trace(child, "Applying location %s to %s promotion priority on %s: " "%d + %d = %d", location->id, child->id, weighted_node->details->uname, child->priority, weighted_node->weight, new_priority); child->priority = new_priority; } } } /*! * \internal * \brief Get the node that an instance will be promoted on * * \param[in] rsc Promotable clone instance to check * * \return Node that \p rsc will be promoted on, or NULL if none */ static pe_node_t * node_to_be_promoted_on(pe_resource_t *rsc) { pe_node_t *node = NULL; pe_node_t *local_node = NULL; pe_resource_t *parent = uber_parent(rsc); // If this is a cloned group, bail if any group member can't be promoted for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; if (node_to_be_promoted_on(child) == NULL) { pe_rsc_trace(rsc, "%s can't be promoted because member %s can't", rsc->id, child->id); return NULL; } } node = rsc->fns->location(rsc, NULL, FALSE); if (node == NULL) { pe_rsc_trace(rsc, "%s can't be promoted because it won't be active", rsc->id); return NULL; } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) { if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED) { crm_notice("Unmanaged instance %s will be left promoted on %s", rsc->id, node->details->uname); } else { pe_rsc_trace(rsc, "%s can't be promoted because it is unmanaged", rsc->id); return NULL; } } else if (rsc->priority < 0) { pe_rsc_trace(rsc, "%s can't be promoted because its promotion priority %d " "is negative", rsc->id, rsc->priority); return NULL; } else if (!pcmk__node_available(node, false, true)) { pe_rsc_trace(rsc, "%s can't be promoted because %s can't run resources", rsc->id, node->details->uname); return NULL; } local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id); if (local_node == NULL) { /* It should not be possible for the scheduler to have allocated the * instance to a node where its parent is not allowed, but it's good to * have a fail-safe. */ if (pcmk_is_set(rsc->flags, pe_rsc_managed)) { crm_warn("%s can't be promoted because %s is not allowed on %s " "(scheduler bug?)", rsc->id, parent->id, node->details->uname); } // else the instance is unmanaged and already promoted return NULL; } else if ((local_node->count >= pe__clone_promoted_node_max(parent)) && pcmk_is_set(rsc->flags, pe_rsc_managed)) { pe_rsc_trace(rsc, "%s can't be promoted because %s has " "maximum promoted instances already", rsc->id, node->details->uname); return NULL; } return local_node; } /*! * \internal * \brief Compare two promotable clone instances by promotion priority * * \param[in] a First instance to compare * \param[in] b Second instance to compare * * \return A negative number if \p a has higher promotion priority, * a positive number if \p b has higher promotion priority, * or 0 if promotion priorities are equal */ static gint cmp_promotable_instance(gconstpointer a, gconstpointer b) { const pe_resource_t *rsc1 = (const pe_resource_t *) a; const pe_resource_t *rsc2 = (const pe_resource_t *) b; enum rsc_role_e role1 = RSC_ROLE_UNKNOWN; enum rsc_role_e role2 = RSC_ROLE_UNKNOWN; CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL)); // Check sort index set by pcmk__set_instance_roles() if (rsc1->sort_index > rsc2->sort_index) { pe_rsc_trace(rsc1, "%s has higher promotion priority than %s " "(sort index %d > %d)", rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index); return -1; } else if (rsc1->sort_index < rsc2->sort_index) { pe_rsc_trace(rsc1, "%s has lower promotion priority than %s " "(sort index %d < %d)", rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index); return 1; } // If those are the same, prefer instance whose current role is higher role1 = rsc1->fns->state(rsc1, TRUE); role2 = rsc2->fns->state(rsc2, TRUE); if (role1 > role2) { pe_rsc_trace(rsc1, "%s has higher promotion priority than %s " "(higher current role)", rsc1->id, rsc2->id); return -1; } else if (role1 < role2) { pe_rsc_trace(rsc1, "%s has lower promotion priority than %s " "(lower current role)", rsc1->id, rsc2->id); return 1; } // Finally, do normal clone instance sorting return pcmk__cmp_instance(a, b); } /*! * \internal * \brief Add a promotable clone instance's sort index to its node's weight * * Add a promotable clone instance's sort index (which sums its promotion * preferences and scores of relevant location constraints for the promoted * role) to the node weight of the instance's allocated node. * * \param[in] data Promotable clone instance * \param[in] user_data Clone parent of \p data */ static void add_sort_index_to_node_weight(gpointer data, gpointer user_data) { pe_resource_t *child = (pe_resource_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_node_t *node = NULL; pe_node_t *chosen = NULL; if (child->sort_index < 0) { pe_rsc_trace(clone, "Not adding sort index of %s: negative", child->id); return; } chosen = child->fns->location(child, NULL, FALSE); if (chosen == NULL) { pe_rsc_trace(clone, "Not adding sort index of %s: inactive", child->id); return; } node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); pe_rsc_trace(clone, "Adding sort index %s of %s to weight for %s", pcmk_readable_score(child->sort_index), child->id, node->details->uname); node->weight = pcmk__add_scores(child->sort_index, node->weight); } /*! * \internal * \brief Apply colocation to dependent's node weights if for promoted role * * \param[in] data Colocation constraint to apply * \param[in] user_data Promotable clone that is constraint's dependent */ static void apply_coloc_to_dependent(gpointer data, gpointer user_data) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; enum pe_weights flags = 0; if (constraint->dependent_role != RSC_ROLE_PROMOTED) { return; } if (constraint->score < INFINITY) { flags = pe_weights_rollback; } pe_rsc_trace(clone, "RHS: %s with %s: %d", constraint->dependent->id, constraint->primary->id, constraint->score); pcmk__apply_colocation(constraint, clone, constraint->primary, flags); } /*! * \internal * \brief Apply colocation to primary's node weights if for promoted role * * \param[in] data Colocation constraint to apply * \param[in] user_data Promotable clone that is constraint's primary */ static void apply_coloc_to_primary(gpointer data, gpointer user_data) { pcmk__colocation_t *constraint = (pcmk__colocation_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; if ((constraint->primary_role != RSC_ROLE_PROMOTED) || !pcmk__colocation_has_influence(constraint, NULL)) { return; } pe_rsc_trace(clone, "LHS: %s with %s: %d", constraint->dependent->id, constraint->primary->id, constraint->score); pcmk__apply_colocation(constraint, clone, constraint->dependent, pe_weights_rollback|pe_weights_positive); } /*! * \internal * \brief Set clone instance's sort index to its node's weight * * \param[in] data Promotable clone instance * \param[in] user_data Parent clone of \p data */ static void set_sort_index_to_node_weight(gpointer data, gpointer user_data) { pe_resource_t *child = (pe_resource_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_node_t *chosen = child->fns->location(child, NULL, FALSE); if (!pcmk_is_set(child->flags, pe_rsc_managed) && (child->next_role == RSC_ROLE_PROMOTED)) { child->sort_index = INFINITY; pe_rsc_trace(clone, "Final sort index for %s is INFINITY (unmanaged promoted)", child->id); } else if ((chosen == NULL) || (child->sort_index < 0)) { pe_rsc_trace(clone, "Final sort index for %s is %d (ignoring node weight)", child->id, child->sort_index); } else { pe_node_t *node = NULL; node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id); CRM_ASSERT(node != NULL); child->sort_index = node->weight; pe_rsc_trace(clone, "Merging weights for %s: final sort index for %s is %d", clone->id, child->id, child->sort_index); } } /*! * \internal * \brief Sort a promotable clone's instances by descending promotion priority * * \param[in] clone Promotable clone to sort */ static void sort_promotable_instances(pe_resource_t *clone) { if (pe__set_clone_flag(clone, pe__clone_promotion_constrained) == pcmk_rc_already) { return; } pe__set_resource_flags(clone, pe_rsc_merging); for (GList *iter = clone->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; pe_rsc_trace(clone, "Merging weights for %s: initial sort index for %s is %d", clone->id, child->id, child->sort_index); } pe__show_node_weights(true, clone, "Before", clone->allowed_nodes, clone->cluster); g_list_foreach(clone->children, add_sort_index_to_node_weight, clone); g_list_foreach(clone->rsc_cons, apply_coloc_to_dependent, clone); g_list_foreach(clone->rsc_cons_lhs, apply_coloc_to_primary, clone); // Ban resource from all nodes if it needs a ticket but doesn't have it pcmk__require_promotion_tickets(clone); pe__show_node_weights(true, clone, "After", clone->allowed_nodes, clone->cluster); // Reset sort indexes to final node weights g_list_foreach(clone->children, set_sort_index_to_node_weight, clone); // Finally, sort instances in descending order of promotion priority clone->children = g_list_sort(clone->children, cmp_promotable_instance); pe__clear_resource_flags(clone, pe_rsc_merging); } /*! * \internal * \brief Find the active instance (if any) of an anonymous clone on a node * * \param[in] clone Anonymous clone to check * \param[in] id Instance ID (without instance number) to check * \param[in] node Node to check * * \return */ static pe_resource_t * find_active_anon_instance(pe_resource_t *clone, const char *id, const pe_node_t *node) { for (GList *iter = clone->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; pe_resource_t *active = NULL; // Use ->find_rsc() in case this is a cloned group active = clone->fns->find_rsc(child, id, node, pe_find_clone|pe_find_current); if (active != NULL) { return active; } } return NULL; } /* * \brief Check whether an anonymous clone instance is known on a node * * \param[in] clone Anonymous clone to check * \param[in] id Instance ID (without instance number) to check * \param[in] node Node to check * * \return true if \p id instance of \p clone is known on \p node, * otherwise false */ static bool anonymous_known_on(const pe_resource_t *clone, const char *id, const pe_node_t *node) { for (GList *iter = clone->children; iter; iter = iter->next) { pe_resource_t *child = iter->data; /* Use ->find_rsc() because this might be a cloned group, and knowing * that other members of the group are known here implies nothing. */ child = clone->fns->find_rsc(child, id, NULL, pe_find_clone); CRM_LOG_ASSERT(child != NULL); if (child != NULL) { if (g_hash_table_lookup(child->known_on, node->details->id)) { return true; } } } return false; } /*! * \internal * \brief Check whether a node is allowed to run a resource * * \param[in] rsc Resource to check * \param[in] node Node to check * * \return true if \p node is allowed to run \p rsc, otherwise false */ static bool is_allowed(const pe_resource_t *rsc, const pe_node_t *node) { pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); return (allowed != NULL) && (allowed->weight >= 0); } /*! * \brief Check whether a clone instance's promotion score should be considered * * \param[in] rsc Promotable clone instance to check * \param[in] node Node where score would be applied * * \return true if \p rsc's promotion score should be considered on \p node, * otherwise false */ static bool promotion_score_applies(pe_resource_t *rsc, const pe_node_t *node) { char *id = clone_strip(rsc->id); pe_resource_t *parent = uber_parent(rsc); pe_resource_t *active = NULL; const char *reason = "allowed"; // Some checks apply only to anonymous clone instances if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { // If instance is active on the node, its score definitely applies active = find_active_anon_instance(parent, id, node); if (active == rsc) { reason = "active"; goto check_allowed; } /* If *no* instance is active on this node, this instance's score will * count if it has been probed on this node. */ if ((active == NULL) && anonymous_known_on(parent, id, node)) { reason = "probed"; goto check_allowed; } } /* If this clone's status is unknown on *all* nodes (e.g. cluster startup), * take all instances' scores into account, to make sure we use any * permanent promotion scores. */ if ((rsc->running_on == NULL) && (g_hash_table_size(rsc->known_on) == 0)) { reason = "none probed"; goto check_allowed; } /* Otherwise, we've probed and/or started the resource *somewhere*, so * consider promotion scores on nodes where we know the status. */ if ((pe_hash_table_lookup(rsc->known_on, node->details->id) != NULL) || (pe_find_node_id(rsc->running_on, node->details->id) != NULL)) { reason = "known"; } else { pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not probed", rsc->id, id, node->details->uname); free(id); return false; } check_allowed: if (is_allowed(rsc, node)) { pe_rsc_trace(rsc, "Counting %s promotion score (for %s) on %s: %s", rsc->id, id, node->details->uname, reason); free(id); return true; } pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not allowed", rsc->id, id, node->details->uname); free(id); return false; } /*! * \internal * \brief Get the value of a promotion score node attribute * * \param[in] rsc Promotable clone instance to get promotion score for * \param[in] node Node to get promotion score for * \param[in] name Resource name to use in promotion score attribute name * * \return Value of promotion score node attribute for \p rsc on \p node */ static const char * promotion_attr_value(pe_resource_t *rsc, const pe_node_t *node, const char *name) { char *attr_name = NULL; const char *attr_value = NULL; CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL); attr_name = pcmk_promotion_score_name(name); attr_value = pe_node_attribute_calculated(node, attr_name, rsc); free(attr_name); return attr_value; } /*! * \internal * \brief Get the promotion score for a clone instance on a node * * \param[in] rsc Promotable clone instance to get score for * \param[in] node Node to get score for * \param[out] is_default If non-NULL, will be set true if no score available * * \return Promotion score for \p rsc on \p node (or 0 if none) */ static int promotion_score(pe_resource_t *rsc, const pe_node_t *node, bool *is_default) { char *name = NULL; const char *attr_value = NULL; if (is_default != NULL) { *is_default = true; } CRM_CHECK((rsc != NULL) && (node != NULL), return 0); /* If this is an instance of a cloned group, the promotion score is the sum * of all members' promotion scores. */ if (rsc->children != NULL) { int score = 0; for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; bool child_default = false; int child_score = promotion_score(child, node, &child_default); if (!child_default && (is_default != NULL)) { *is_default = false; } score += child_score; } return score; } if (!promotion_score_applies(rsc, node)) { return 0; } /* For the promotion score attribute name, use the name the resource is * known as in resource history, since that's what crm_attribute --promotion * would have used. */ name = (rsc->clone_name == NULL)? rsc->id : rsc->clone_name; attr_value = promotion_attr_value(rsc, node, name); if (attr_value != NULL) { pe_rsc_trace(rsc, "Promotion score for %s on %s = %s", name, node->details->uname, pcmk__s(attr_value, "(unset)")); } else if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { /* If we don't have any resource history yet, we won't have clone_name. * In that case, for anonymous clones, try the resource name without * any instance number. */ name = clone_strip(rsc->id); if (strcmp(rsc->id, name) != 0) { attr_value = promotion_attr_value(rsc, node, name); pe_rsc_trace(rsc, "Promotion score for %s on %s (for %s) = %s", name, node->details->uname, rsc->id, pcmk__s(attr_value, "(unset)")); } free(name); } if (attr_value == NULL) { return 0; } if (is_default != NULL) { *is_default = false; } return char2score(attr_value); } /*! * \internal * \brief Include promotion scores in instances' node weights and priorities * * \param[in] rsc Promotable clone resource to update */ void pcmk__add_promotion_scores(pe_resource_t *rsc) { if (pe__set_clone_flag(rsc, pe__clone_promotion_added) == pcmk_rc_already) { return; } for (GList *iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child_rsc = (pe_resource_t *) iter->data; GHashTableIter iter; pe_node_t *node = NULL; int score, new_score; g_hash_table_iter_init(&iter, child_rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (!pcmk__node_available(node, false, false)) { /* This node will never be promoted, so don't apply the * promotion score, as that may lead to clone shuffling. */ continue; } score = promotion_score(child_rsc, node, NULL); if (score > 0) { new_score = pcmk__add_scores(node->weight, score); if (new_score != node->weight) { pe_rsc_trace(rsc, "Adding promotion score to preference " "for %s on %s (%d->%d)", child_rsc->id, node->details->uname, node->weight, new_score); node->weight = new_score; } } if (score > child_rsc->priority) { pe_rsc_trace(rsc, "Updating %s priority to promotion score (%d->%d)", child_rsc->id, child_rsc->priority, score); child_rsc->priority = score; } } } } /*! * \internal * \brief If a resource's current role is started, change it to unpromoted * * \param[in] data Resource to update * \param[in] user_data Ignored */ static void set_current_role_unpromoted(void *data, void *user_data) { pe_resource_t *rsc = (pe_resource_t *) data; if (rsc->role == RSC_ROLE_STARTED) { // Promotable clones should use unpromoted role instead of started rsc->role = RSC_ROLE_UNPROMOTED; } g_list_foreach(rsc->children, set_current_role_unpromoted, NULL); } /*! * \internal * \brief Set a resource's next role to unpromoted (or stopped if unassigned) * * \param[in] data Resource to update * \param[in] user_data Ignored */ static void set_next_role_unpromoted(void *data, void *user_data) { pe_resource_t *rsc = (pe_resource_t *) data; GList *assigned = NULL; rsc->fns->location(rsc, &assigned, FALSE); if (assigned == NULL) { pe__set_next_role(rsc, RSC_ROLE_STOPPED, "stopped instance"); } else { pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, "unpromoted instance"); g_list_free(assigned); } g_list_foreach(rsc->children, set_next_role_unpromoted, NULL); } /*! * \internal * \brief Set a resource's next role to promoted if not already set * * \param[in] data Resource to update * \param[in] user_data Ignored */ static void set_next_role_promoted(void *data, gpointer user_data) { pe_resource_t *rsc = (pe_resource_t *) data; if (rsc->next_role == RSC_ROLE_UNKNOWN) { pe__set_next_role(rsc, RSC_ROLE_PROMOTED, "promoted instance"); } g_list_foreach(rsc->children, set_next_role_promoted, NULL); } /*! * \internal * \brief Show instance's promotion score on node where it will be active * * \param[in] instance Promotable clone instance to show */ static void show_promotion_score(pe_resource_t *instance) { pe_node_t *chosen = instance->fns->location(instance, NULL, FALSE); if (pcmk_is_set(instance->cluster->flags, pe_flag_show_scores) && !pcmk__is_daemon && (instance->cluster->priv != NULL)) { pcmk__output_t *out = instance->cluster->priv; out->message(out, "promotion-score", instance, chosen, pcmk_readable_score(instance->sort_index)); } else { pe_rsc_debug(uber_parent(instance), "%s promotion score on %s: sort=%s priority=%s", instance->id, ((chosen == NULL)? "none" : chosen->details->uname), pcmk_readable_score(instance->sort_index), pcmk_readable_score(instance->priority)); } } /*! * \internal * \brief Set a clone instance's promotion priority * * \param[in] data Promotable clone instance to update * \param[in] user_data Instance's parent clone */ static void set_instance_priority(gpointer data, gpointer user_data) { pe_resource_t *instance = (pe_resource_t *) data; pe_resource_t *clone = (pe_resource_t *) user_data; pe_node_t *chosen = NULL; enum rsc_role_e next_role = RSC_ROLE_UNKNOWN; GList *list = NULL; pe_rsc_trace(clone, "Assigning priority for %s: %s", instance->id, role2text(instance->next_role)); if (instance->fns->state(instance, TRUE) == RSC_ROLE_STARTED) { set_current_role_unpromoted(instance, NULL); } // Only an instance that will be active can be promoted chosen = instance->fns->location(instance, &list, FALSE); if (pcmk__list_of_multiple(list)) { pcmk__config_err("Cannot promote non-colocated child %s", instance->id); } g_list_free(list); if (chosen == NULL) { return; } next_role = instance->fns->state(instance, FALSE); switch (next_role) { case RSC_ROLE_STARTED: case RSC_ROLE_UNKNOWN: // Set instance priority to its promotion score (or -1 if none) { bool is_default = false; instance->priority = promotion_score(instance, chosen, &is_default); if (is_default) { /* * Default to -1 if no value is set. This allows * instances eligible for promotion to be specified * based solely on rsc_location constraints, but * prevents any instance from being promoted if neither * a constraint nor a promotion score is present */ instance->priority = -1; } } break; case RSC_ROLE_UNPROMOTED: case RSC_ROLE_STOPPED: // Instance can't be promoted instance->priority = -INFINITY; break; case RSC_ROLE_PROMOTED: // Nothing needed (re-creating actions after scheduling fencing) break; default: CRM_CHECK(FALSE, crm_err("Unknown resource role %d for %s", next_role, instance->id)); } // Add relevant location constraint scores for promoted role apply_promoted_locations(instance, instance->rsc_location, chosen); apply_promoted_locations(instance, clone->rsc_location, chosen); // Apply relevant colocations with promoted role for (GList *iter = instance->rsc_cons; iter != NULL; iter = iter->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) iter->data; instance->cmds->rsc_colocation_lh(instance, cons->primary, cons, instance->cluster); } instance->sort_index = instance->priority; if (next_role == RSC_ROLE_PROMOTED) { instance->sort_index = INFINITY; } pe_rsc_trace(clone, "Assigning %s priority = %d", instance->id, instance->priority); } /*! * \internal * \brief Set a promotable clone instance's role * * \param[in] data Promotable clone instance to update * \param[in] user_data Pointer to count of instances chosen for promotion */ static void set_instance_role(gpointer data, gpointer user_data) { pe_resource_t *instance = (pe_resource_t *) data; int *count = (int *) user_data; pe_resource_t *clone = uber_parent(instance); pe_node_t *chosen = NULL; show_promotion_score(instance); if (instance->sort_index < 0) { pe_rsc_trace(clone, "Not supposed to promote instance %s", instance->id); } else if ((*count < pe__clone_promoted_max(instance)) || !pcmk_is_set(clone->flags, pe_rsc_managed)) { chosen = node_to_be_promoted_on(instance); } if (chosen == NULL) { set_next_role_unpromoted(instance, NULL); return; } if ((instance->role < RSC_ROLE_PROMOTED) && !pcmk_is_set(instance->cluster->flags, pe_flag_have_quorum) && (instance->cluster->no_quorum_policy == no_quorum_freeze)) { crm_notice("Clone instance %s cannot be promoted without quorum", instance->id); set_next_role_unpromoted(instance, NULL); return; } chosen->count++; pe_rsc_info(clone, "Choosing %s (%s) on %s for promotion", instance->id, role2text(instance->role), chosen->details->uname); set_next_role_promoted(instance, NULL); (*count)++; } /*! * \internal * \brief Set roles for all instances of a promotable clone * * \param[in] clone Promotable clone resource to update */ void pcmk__set_instance_roles(pe_resource_t *rsc) { int promoted = 0; GHashTableIter iter; pe_node_t *node = NULL; // Repurpose count to track the number of promoted instances allocated g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { node->count = 0; } // Set instances' promotion priorities and sort by highest priority first g_list_foreach(rsc->children, set_instance_priority, rsc); sort_promotable_instances(rsc); // Choose the first N eligible instances to be promoted g_list_foreach(rsc->children, set_instance_role, &promoted); pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d", rsc->id, promoted, pe__clone_promoted_max(rsc)); } void create_promotable_actions(pe_resource_t * rsc, pe_working_set_t * data_set) { pe_action_t *action = NULL; GList *gIter = rsc->children; pe_action_t *action_complete = NULL; bool any_promoting = false; bool any_demoting = false; clone_variant_data_t *clone_data = NULL; get_clone_variant_data(clone_data, rsc); pe_rsc_debug(rsc, "Creating actions for %s", rsc->id); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_rsc_trace(rsc, "Creating actions for %s", child_rsc->id); child_rsc->cmds->create_actions(child_rsc, data_set); check_for_role_change(child_rsc, &any_demoting, &any_promoting); } /* promote */ - action = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTE, !any_promoting, - true); - action_complete = pcmk__new_rsc_pseudo_action(rsc, RSC_PROMOTED, - !any_promoting, true); + action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, !any_promoting, true); + action_complete = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, + !any_promoting, true); action_complete->priority = INFINITY; if (clone_data->promote_notify == NULL) { - clone_data->promote_notify = pcmk__clone_notif_pseudo_ops(rsc, - RSC_PROMOTE, - action, - action_complete); + clone_data->promote_notify = pe__clone_notif_pseudo_ops(rsc, + RSC_PROMOTE, + action, + action_complete); } /* demote */ - action = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTE, !any_demoting, true); - action_complete = pcmk__new_rsc_pseudo_action(rsc, RSC_DEMOTED, - !any_demoting, true); + action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, !any_demoting, true); + action_complete = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, !any_demoting, + true); action_complete->priority = INFINITY; if (clone_data->demote_notify == NULL) { - clone_data->demote_notify = pcmk__clone_notif_pseudo_ops(rsc, - RSC_DEMOTE, - action, - action_complete); + clone_data->demote_notify = pe__clone_notif_pseudo_ops(rsc, RSC_DEMOTE, + action, + action_complete); if (clone_data->promote_notify) { /* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day * Requires exposing *_notify */ order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre, pe_order_optional); order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre, pe_order_optional); } } /* restore the correct priority */ gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; child_rsc->priority = rsc->priority; } } void promote_demote_constraints(pe_resource_t *rsc, pe_working_set_t *data_set) { /* global stopped before start */ pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set); /* global stopped before promote */ pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before start */ pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_START, pe_order_optional, data_set); /* global started before promote */ pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); /* global demoted before stop */ pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set); /* global demote before demoted */ pcmk__order_resource_actions(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_optional, data_set); /* global demoted before promote */ pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE, pe_order_optional, data_set); } void promotable_constraints(pe_resource_t * rsc, pe_working_set_t * data_set) { GList *gIter = rsc->children; pe_resource_t *last_rsc = NULL; promote_demote_constraints(rsc, data_set); for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; /* child demote before promote */ pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, child_rsc, RSC_PROMOTE, pe_order_optional, data_set); order_instance_promotion(rsc, child_rsc, last_rsc); order_instance_demotion(rsc, child_rsc, last_rsc); last_rsc = child_rsc; } } static void node_hash_update_one(GHashTable * hash, pe_node_t * other, const char *attr, int score) { GHashTableIter iter; pe_node_t *node = NULL; const char *value = NULL; if (other == NULL) { return; } else if (attr == NULL) { attr = CRM_ATTR_UNAME; } value = pe_node_attribute_raw(other, attr); g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { const char *tmp = pe_node_attribute_raw(node, attr); if (pcmk__str_eq(value, tmp, pcmk__str_casei)) { crm_trace("%s: %d + %d", node->details->uname, node->weight, other->weight); node->weight = pcmk__add_scores(node->weight, score); } } } void promotable_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set) { GList *gIter = NULL; if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) { GList *affected_nodes = NULL; for (gIter = primary->children; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE); enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE); pe_rsc_trace(primary, "Processing: %s", child_rsc->id); if ((chosen != NULL) && (next_role == constraint->primary_role)) { pe_rsc_trace(primary, "Applying: %s %s %s %d", child_rsc->id, role2text(next_role), chosen->details->uname, constraint->score); if (constraint->score < INFINITY) { node_hash_update_one(dependent->allowed_nodes, chosen, constraint->node_attribute, constraint->score); } affected_nodes = g_list_prepend(affected_nodes, chosen); } } /* Only do this if it's not a promoted-with-promoted colocation. Doing * this unconditionally would prevent unpromoted instances from being * started. */ if ((constraint->dependent_role != RSC_ROLE_PROMOTED) || (constraint->primary_role != RSC_ROLE_PROMOTED)) { if (constraint->score >= INFINITY) { node_list_exclude(dependent->allowed_nodes, affected_nodes, TRUE); } } g_list_free(affected_nodes); } else if (constraint->dependent_role == RSC_ROLE_PROMOTED) { pe_resource_t *primary_instance; primary_instance = find_compatible_child(dependent, primary, constraint->primary_role, FALSE, data_set); if ((primary_instance == NULL) && (constraint->score >= INFINITY)) { pe_rsc_trace(dependent, "%s can't be promoted %s", dependent->id, constraint->id); dependent->priority = -INFINITY; } else if (primary_instance != NULL) { int new_priority = pcmk__add_scores(dependent->priority, constraint->score); pe_rsc_debug(dependent, "Applying %s to %s", constraint->id, dependent->id); pe_rsc_debug(dependent, "\t%s: %d->%d", dependent->id, dependent->priority, new_priority); dependent->priority = new_priority; } } return; } diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am index 5ff167b49c..2badbaec99 100644 --- a/lib/pengine/Makefile.am +++ b/lib/pengine/Makefile.am @@ -1,53 +1,55 @@ # # Copyright 2004-2022 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk SUBDIRS = tests ## libraries lib_LTLIBRARIES = libpe_rules.la libpe_status.la ## SOURCES noinst_HEADERS = variant.h pe_status_private.h libpe_rules_la_LDFLAGS = -version-info 29:2:3 libpe_rules_la_CFLAGS = $(CFLAGS_HARDENED_LIB) libpe_rules_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la libpe_rules_la_SOURCES = rules.c rules_alerts.c common.c libpe_status_la_LDFLAGS = -version-info 33:0:5 libpe_status_la_CFLAGS = $(CFLAGS_HARDENED_LIB) libpe_status_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) libpe_status_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la # Use += rather than backlashed continuation lines for parsing by bumplibs libpe_status_la_SOURCES = libpe_status_la_SOURCES += bundle.c libpe_status_la_SOURCES += clone.c libpe_status_la_SOURCES += common.c libpe_status_la_SOURCES += complex.c libpe_status_la_SOURCES += failcounts.c libpe_status_la_SOURCES += group.c -libpe_status_la_SOURCES += pe_health.c libpe_status_la_SOURCES += native.c +libpe_status_la_SOURCES += pe_actions.c +libpe_status_la_SOURCES += pe_health.c libpe_status_la_SOURCES += pe_digest.c +libpe_status_la_SOURCES += pe_notif.c +libpe_status_la_SOURCES += pe_output.c libpe_status_la_SOURCES += remote.c libpe_status_la_SOURCES += rules.c libpe_status_la_SOURCES += status.c libpe_status_la_SOURCES += tags.c libpe_status_la_SOURCES += unpack.c libpe_status_la_SOURCES += utils.c -libpe_status_la_SOURCES += pe_output.c clean-generic: rm -f *.log *.debug *~ diff --git a/lib/pengine/utils.c b/lib/pengine/pe_actions.c similarity index 68% copy from lib/pengine/utils.c copy to lib/pengine/pe_actions.c index 26c8062faf..0c2eb3c161 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/pe_actions.c @@ -1,2617 +1,1755 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include -#include -#include -#include -#include -#include #include #include -#include +#include +#include #include #include "pe_status_private.h" -extern bool pcmk__is_daemon; +static void unpack_operation(pe_action_t *action, xmlNode *xml_obj, + pe_resource_t *container, + pe_working_set_t *data_set, guint interval_ms); -void print_str_str(gpointer key, gpointer value, gpointer user_data); -gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); -static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, - pe_working_set_t * data_set, guint interval_ms); -static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, - gboolean include_disabled); - -#if ENABLE_VERSIONED_ATTRS -pe_rsc_action_details_t * -pe_rsc_action_details(pe_action_t *action) +static void +add_singleton(pe_working_set_t *data_set, pe_action_t *action) { - pe_rsc_action_details_t *details; - - CRM_CHECK(action != NULL, return NULL); - - if (action->action_details == NULL) { - action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); - CRM_CHECK(action->action_details != NULL, return NULL); - } - - details = (pe_rsc_action_details_t *) action->action_details; - if (details->versioned_parameters == NULL) { - details->versioned_parameters = create_xml_node(NULL, - XML_TAG_OP_VER_ATTRS); - } - if (details->versioned_meta == NULL) { - details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); + if (data_set->singletons == NULL) { + data_set->singletons = pcmk__strkey_table(NULL, NULL); } - return details; + g_hash_table_insert(data_set->singletons, action->uuid, action); } -static void -pe_free_rsc_action_details(pe_action_t *action) +static pe_action_t * +lookup_singleton(pe_working_set_t *data_set, const char *action_uuid) { - pe_rsc_action_details_t *details; - - if ((action == NULL) || (action->action_details == NULL)) { - return; - } - - details = (pe_rsc_action_details_t *) action->action_details; - - if (details->versioned_parameters) { - free_xml(details->versioned_parameters); - } - if (details->versioned_meta) { - free_xml(details->versioned_meta); + if (data_set->singletons == NULL) { + return NULL; } - - action->action_details = NULL; + return g_hash_table_lookup(data_set->singletons, action_uuid); } -#endif /*! * \internal - * \brief Check whether we can fence a particular node + * \brief Find an existing action that matches arguments * - * \param[in] data_set Working set for cluster - * \param[in] node Name of node to check + * \param[in] key Action key to match + * \param[in] rsc Resource to match (if any) + * \param[in] node Node to match (if any) + * \param[in] data_set Cluster working set * - * \return true if node can be fenced, false otherwise + * \return Existing action that matches arguments (or NULL if none) */ -bool -pe_can_fence(pe_working_set_t *data_set, pe_node_t *node) +static pe_action_t * +find_existing_action(const char *key, pe_resource_t *rsc, pe_node_t *node, + pe_working_set_t *data_set) { - if (pe__is_guest_node(node)) { - /* Guest nodes are fenced by stopping their container resource. We can - * do that if the container's host is either online or fenceable. - */ - pe_resource_t *rsc = node->details->remote_rsc->container; + GList *matches = NULL; + pe_action_t *action = NULL; + + /* When rsc is NULL, it would be quicker to check data_set->singletons, + * but checking all data_set->actions takes the node into account. + */ + matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions), + key, node); + if (matches == NULL) { + return NULL; + } + CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches)); + + action = matches->data; + g_list_free(matches); + return action; +} + +static xmlNode * +find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled) +{ + guint interval_ms = 0; + gboolean do_retry = TRUE; + char *local_key = NULL; + const char *name = NULL; + const char *interval_spec = NULL; + char *match_key = NULL; + xmlNode *op = NULL; + xmlNode *operation = NULL; + + retry: + for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; + operation = pcmk__xe_next(operation)) { - for (GList *n = rsc->running_on; n != NULL; n = n->next) { - pe_node_t *container_node = n->data; + if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { + bool enabled = false; - if (!container_node->details->online - && !pe_can_fence(data_set, container_node)) { - return false; + name = crm_element_value(operation, "name"); + interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); + if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && + !enabled) { + continue; } - } - return true; - } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { - return false; /* Turned off */ + interval_ms = crm_parse_interval_spec(interval_spec); + match_key = pcmk__op_key(rsc->id, name, interval_ms); + if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { + op = operation; + } + free(match_key); - } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) { - return false; /* No devices */ + if (rsc->clone_name) { + match_key = pcmk__op_key(rsc->clone_name, name, interval_ms); + if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { + op = operation; + } + free(match_key); + } - } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { - return true; + if (op != NULL) { + free(local_key); + return op; + } + } + } - } else if (data_set->no_quorum_policy == no_quorum_ignore) { - return true; + free(local_key); + if (do_retry == FALSE) { + return NULL; + } - } else if(node == NULL) { - return false; + do_retry = FALSE; + if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { + local_key = pcmk__op_key(rsc->id, "migrate", 0); + key = local_key; + goto retry; - } else if(node->details->online) { - crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); - return true; + } else if (strstr(key, "_notify_")) { + local_key = pcmk__op_key(rsc->id, "notify", 0); + key = local_key; + goto retry; } - crm_trace("Cannot fence %s", node->details->uname); - return false; + return NULL; +} + +xmlNode * +find_rsc_op_entry(pe_resource_t * rsc, const char *key) +{ + return find_rsc_op_entry_helper(rsc, key, FALSE); } /*! * \internal - * \brief Copy a node object + * \brief Create a new action object * - * \param[in] this_node Node object to copy + * \param[in] key Action key + * \param[in] task Action name + * \param[in] rsc Resource that action is for (if any) + * \param[in] node Node that action is on (if any) + * \param[in] optional Whether action should be considered optional + * \param[in] for_graph Whether action should be recorded in transition graph + * \param[in] data_set Cluster working set * - * \return Newly allocated shallow copy of this_node - * \note This function asserts on errors and is guaranteed to return non-NULL. + * \return Newly allocated action + * \note This function takes ownership of \p key. It is the caller's + * responsibility to free the return value with pe_free_action(). */ -pe_node_t * -pe__copy_node(const pe_node_t *this_node) +static pe_action_t * +new_action(char *key, const char *task, pe_resource_t *rsc, pe_node_t *node, + bool optional, bool for_graph, pe_working_set_t *data_set) { - pe_node_t *new_node = NULL; - - CRM_ASSERT(this_node != NULL); - - new_node = calloc(1, sizeof(pe_node_t)); - CRM_ASSERT(new_node != NULL); - - new_node->rsc_discover_mode = this_node->rsc_discover_mode; - new_node->weight = this_node->weight; - new_node->fixed = this_node->fixed; - new_node->details = this_node->details; + pe_action_t *action = calloc(1, sizeof(pe_action_t)); - return new_node; -} + CRM_ASSERT(action != NULL); -/* any node in list1 or list2 and not in the other gets a score of -INFINITY */ -void -node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores) -{ - GHashTable *result = hash; - pe_node_t *other_node = NULL; - GList *gIter = list; + action->rsc = rsc; + action->task = strdup(task); CRM_ASSERT(action->task != NULL); + action->uuid = key; + action->extra = pcmk__strkey_table(free, free); + action->meta = pcmk__strkey_table(free, free); - GHashTableIter iter; - pe_node_t *node = NULL; + if (node) { + action->node = pe__copy_node(node); + } - g_hash_table_iter_init(&iter, hash); - while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { + if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { + // Resource history deletion for a node can be done on the DC + pe__set_action_flags(action, pe_action_dc); + } - other_node = pe_find_node_id(list, node->details->id); - if (other_node == NULL) { - node->weight = -INFINITY; - } else if (merge_scores) { - node->weight = pcmk__add_scores(node->weight, other_node->weight); - } + pe__set_action_flags(action, pe_action_runnable); + if (optional) { + pe__set_action_flags(action, pe_action_optional); + } else { + pe__clear_action_flags(action, pe_action_optional); } - for (; gIter != NULL; gIter = gIter->next) { - pe_node_t *node = (pe_node_t *) gIter->data; + if (rsc != NULL) { + guint interval_ms = 0; - other_node = pe_hash_table_lookup(result, node->details->id); + action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); + parse_op_key(key, NULL, NULL, &interval_ms); + unpack_operation(action, action->op_entry, rsc->container, data_set, + interval_ms); + } - if (other_node == NULL) { - pe_node_t *new_node = pe__copy_node(node); + if (for_graph) { + pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s", + (optional? "optional" : "required"), + data_set->action_id, key, task, + ((rsc == NULL)? "no resource" : rsc->id), + ((node == NULL)? "no node" : node->details->uname)); + action->id = data_set->action_id++; - new_node->weight = -INFINITY; - g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); + data_set->actions = g_list_prepend(data_set->actions, action); + if (rsc == NULL) { + add_singleton(data_set, action); + } else { + rsc->actions = g_list_prepend(rsc->actions, action); } } + return action; } /*! * \internal - * \brief Create a node hash table from a node list - * - * \param[in] list Node list + * \brief Evaluate node attribute values for an action * - * \return Hash table equivalent of node list + * \param[in] action Action to unpack attributes for + * \param[in] data_set Cluster working set */ -GHashTable * -pe__node_list2table(GList *list) +static void +unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set) { - GHashTable *result = NULL; + if (!pcmk_is_set(action->flags, pe_action_have_node_attrs) + && (action->op_entry != NULL)) { - result = pcmk__strkey_table(NULL, free); - for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { - pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data); + pe_rule_eval_data_t rule_data = { + .node_hash = action->node->details->attrs, + .role = RSC_ROLE_UNKNOWN, + .now = data_set->now, + .match_data = NULL, + .rsc_data = NULL, + .op_data = NULL + }; - g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); + pe__set_action_flags(action, pe_action_have_node_attrs); + pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, + &rule_data, action->extra, NULL, + FALSE, data_set); } - return result; -} - -gint -sort_node_uname(gconstpointer a, gconstpointer b) -{ - return pcmk__numeric_strcasecmp(((const pe_node_t *) a)->details->uname, - ((const pe_node_t *) b)->details->uname); } /*! * \internal - * \brief Output node weights to stdout + * \brief Update an action's optional flag * - * \param[in] rsc Use allowed nodes for this resource - * \param[in] comment Text description to prefix lines with - * \param[in] nodes If rsc is not specified, use these nodes + * \param[in] action Action to update + * \param[in] optional Requested optional status */ static void -pe__output_node_weights(pe_resource_t *rsc, const char *comment, - GHashTable *nodes, pe_working_set_t *data_set) +update_action_optional(pe_action_t *action, gboolean optional) { - pcmk__output_t *out = data_set->priv; - - // Sort the nodes so the output is consistent for regression tests - GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname); - - for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { - pe_node_t *node = (pe_node_t *) gIter->data; + // Force a non-recurring action to be optional if its resource is unmanaged + if ((action->rsc != NULL) && (action->node != NULL) + && !pcmk_is_set(action->flags, pe_action_pseudo) + && !pcmk_is_set(action->rsc->flags, pe_rsc_managed) + && (g_hash_table_lookup(action->meta, + XML_LRM_ATTR_INTERVAL_MS) == NULL)) { + pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)", + action->uuid, action->node->details->uname, + action->rsc->id); + pe__set_action_flags(action, pe_action_optional); + // We shouldn't clear runnable here because ... something - out->message(out, "node-weight", rsc, comment, node->details->uname, - pcmk_readable_score(node->weight)); + // Otherwise require the action if requested + } else if (!optional) { + pe__clear_action_flags(action, pe_action_optional); } - g_list_free(list); -} - -/*! - * \internal - * \brief Log node weights at trace level - * - * \param[in] file Caller's filename - * \param[in] function Caller's function name - * \param[in] line Caller's line number - * \param[in] rsc Use allowed nodes for this resource - * \param[in] comment Text description to prefix lines with - * \param[in] nodes If rsc is not specified, use these nodes - */ -static void -pe__log_node_weights(const char *file, const char *function, int line, - pe_resource_t *rsc, const char *comment, GHashTable *nodes) -{ - GHashTableIter iter; - pe_node_t *node = NULL; - - // Don't waste time if we're not tracing at this point - pcmk__log_else(LOG_TRACE, return); - - g_hash_table_iter_init(&iter, nodes); - while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { - if (rsc) { - qb_log_from_external_source(function, file, - "%s: %s allocation score on %s: %s", - LOG_TRACE, line, 0, - comment, rsc->id, - node->details->uname, - pcmk_readable_score(node->weight)); - } else { - qb_log_from_external_source(function, file, "%s: %s = %s", - LOG_TRACE, line, 0, - comment, node->details->uname, - pcmk_readable_score(node->weight)); - } - } -} - -/*! - * \internal - * \brief Log or output node weights - * - * \param[in] file Caller's filename - * \param[in] function Caller's function name - * \param[in] line Caller's line number - * \param[in] to_log Log if true, otherwise output - * \param[in] rsc Use allowed nodes for this resource - * \param[in] comment Text description to prefix lines with - * \param[in] nodes Use these nodes - */ -void -pe__show_node_weights_as(const char *file, const char *function, int line, - bool to_log, pe_resource_t *rsc, const char *comment, - GHashTable *nodes, pe_working_set_t *data_set) -{ - if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) { - // Don't show allocation scores for orphans - return; - } - if (nodes == NULL) { - // Nothing to show - return; - } - - if (to_log) { - pe__log_node_weights(file, function, line, rsc, comment, nodes); - } else { - pe__output_node_weights(rsc, comment, nodes, data_set); - } - - // If this resource has children, repeat recursively for each - if (rsc && rsc->children) { - for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { - pe_resource_t *child = (pe_resource_t *) gIter->data; - - pe__show_node_weights_as(file, function, line, to_log, child, - comment, child->allowed_nodes, data_set); - } - } -} - -gint -sort_rsc_priority(gconstpointer a, gconstpointer b) -{ - const pe_resource_t *resource1 = (const pe_resource_t *)a; - const pe_resource_t *resource2 = (const pe_resource_t *)b; - - if (a == NULL && b == NULL) { - return 0; - } - if (a == NULL) { - return 1; - } - if (b == NULL) { - return -1; - } - - if (resource1->priority > resource2->priority) { - return -1; - } - - if (resource1->priority < resource2->priority) { - return 1; - } - - return 0; } static enum pe_quorum_policy effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) { enum pe_quorum_policy policy = data_set->no_quorum_policy; if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { policy = no_quorum_ignore; } else if (data_set->no_quorum_policy == no_quorum_demote) { switch (rsc->role) { case RSC_ROLE_PROMOTED: case RSC_ROLE_UNPROMOTED: if (rsc->next_role > RSC_ROLE_UNPROMOTED) { pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, "no-quorum-policy=demote"); } policy = no_quorum_ignore; break; default: policy = no_quorum_stop; break; } } return policy; } -static void -add_singleton(pe_working_set_t *data_set, pe_action_t *action) -{ - if (data_set->singletons == NULL) { - data_set->singletons = pcmk__strkey_table(NULL, NULL); - } - g_hash_table_insert(data_set->singletons, action->uuid, action); -} - -static pe_action_t * -lookup_singleton(pe_working_set_t *data_set, const char *action_uuid) -{ - if (data_set->singletons == NULL) { - return NULL; - } - return g_hash_table_lookup(data_set->singletons, action_uuid); -} - -/*! - * \internal - * \brief Find an existing action that matches arguments - * - * \param[in] key Action key to match - * \param[in] rsc Resource to match (if any) - * \param[in] node Node to match (if any) - * \param[in] data_set Cluster working set - * - * \return Existing action that matches arguments (or NULL if none) - */ -static pe_action_t * -find_existing_action(const char *key, pe_resource_t *rsc, pe_node_t *node, - pe_working_set_t *data_set) -{ - GList *matches = NULL; - pe_action_t *action = NULL; - - /* When rsc is NULL, it would be quicker to check data_set->singletons, - * but checking all data_set->actions takes the node into account. - */ - matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions), - key, node); - if (matches == NULL) { - return NULL; - } - CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches)); - - action = matches->data; - g_list_free(matches); - return action; -} - -/*! - * \internal - * \brief Create a new action object - * - * \param[in] key Action key - * \param[in] task Action name - * \param[in] rsc Resource that action is for (if any) - * \param[in] node Node that action is on (if any) - * \param[in] optional Whether action should be considered optional - * \param[in] for_graph Whether action should be recorded in transition graph - * \param[in] data_set Cluster working set - * - * \return Newly allocated action - * \note This function takes ownership of \p key. It is the caller's - * responsibility to free the return value with pe_free_action(). - */ -static pe_action_t * -new_action(char *key, const char *task, pe_resource_t *rsc, pe_node_t *node, - bool optional, bool for_graph, pe_working_set_t *data_set) -{ - pe_action_t *action = calloc(1, sizeof(pe_action_t)); - - CRM_ASSERT(action != NULL); - - action->rsc = rsc; - action->task = strdup(task); CRM_ASSERT(action->task != NULL); - action->uuid = key; - action->extra = pcmk__strkey_table(free, free); - action->meta = pcmk__strkey_table(free, free); - - if (node) { - action->node = pe__copy_node(node); - } - - if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { - // Resource history deletion for a node can be done on the DC - pe__set_action_flags(action, pe_action_dc); - } - - pe__set_action_flags(action, pe_action_runnable); - if (optional) { - pe__set_action_flags(action, pe_action_optional); - } else { - pe__clear_action_flags(action, pe_action_optional); - } - - if (rsc != NULL) { - guint interval_ms = 0; - - action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); - parse_op_key(key, NULL, NULL, &interval_ms); - unpack_operation(action, action->op_entry, rsc->container, data_set, - interval_ms); - } - - if (for_graph) { - pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s", - (optional? "optional" : "required"), - data_set->action_id, key, task, - ((rsc == NULL)? "no resource" : rsc->id), - ((node == NULL)? "no node" : node->details->uname)); - action->id = data_set->action_id++; - - data_set->actions = g_list_prepend(data_set->actions, action); - if (rsc == NULL) { - add_singleton(data_set, action); - } else { - rsc->actions = g_list_prepend(rsc->actions, action); - } - } - return action; -} - -/*! - * \internal - * \brief Evaluate node attribute values for an action - * - * \param[in] action Action to unpack attributes for - * \param[in] data_set Cluster working set - */ -static void -unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set) -{ - if (!pcmk_is_set(action->flags, pe_action_have_node_attrs) - && (action->op_entry != NULL)) { - - pe_rule_eval_data_t rule_data = { - .node_hash = action->node->details->attrs, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - pe__set_action_flags(action, pe_action_have_node_attrs); - pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, - &rule_data, action->extra, NULL, - FALSE, data_set); - } -} - -/*! - * \internal - * \brief Update an action's optional flag - * - * \param[in] action Action to update - * \param[in] optional Requested optional status - */ -static void -update_action_optional(pe_action_t *action, gboolean optional) -{ - // Force a non-recurring action to be optional if its resource is unmanaged - if ((action->rsc != NULL) && (action->node != NULL) - && !pcmk_is_set(action->flags, pe_action_pseudo) - && !pcmk_is_set(action->rsc->flags, pe_rsc_managed) - && (g_hash_table_lookup(action->meta, - XML_LRM_ATTR_INTERVAL_MS) == NULL)) { - pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)", - action->uuid, action->node->details->uname, - action->rsc->id); - pe__set_action_flags(action, pe_action_optional); - // We shouldn't clear runnable here because ... something - - // Otherwise require the action if requested - } else if (!optional) { - pe__clear_action_flags(action, pe_action_optional); - } -} - /*! * \internal * \brief Update a resource action's runnable flag * * \param[in] action Action to update * \param[in] for_graph Whether action should be recorded in transition graph * \param[in] data_set Cluster working set * * \note This may also schedule fencing if a stop is unrunnable. */ static void update_resource_action_runnable(pe_action_t *action, bool for_graph, pe_working_set_t *data_set) { if (pcmk_is_set(action->flags, pe_action_pseudo)) { return; } if (action->node == NULL) { pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)", action->uuid); pe__clear_action_flags(action, pe_action_runnable); } else if (!pcmk_is_set(action->flags, pe_action_dc) && !(action->node->details->online) && (!pe__is_guest_node(action->node) || action->node->details->remote_requires_reset)) { pe__clear_action_flags(action, pe_action_runnable); do_crm_log((for_graph? LOG_WARNING: LOG_TRACE), "%s on %s is unrunnable (node is offline)", action->uuid, action->node->details->uname); if (pcmk_is_set(action->rsc->flags, pe_rsc_managed) && for_graph && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei) && !(action->node->details->unclean)) { pe_fence_node(data_set, action->node, "stop is unrunnable", false); } } else if (!pcmk_is_set(action->flags, pe_action_dc) && action->node->details->pending) { pe__clear_action_flags(action, pe_action_runnable); do_crm_log((for_graph? LOG_WARNING: LOG_TRACE), "Action %s on %s is unrunnable (node is pending)", action->uuid, action->node->details->uname); } else if (action->needs == rsc_req_nothing) { pe_action_set_reason(action, NULL, TRUE); if (pe__is_guest_node(action->node) && !pe_can_fence(data_set, action->node)) { /* An action that requires nothing usually does not require any * fencing in order to be runnable. However, there is an exception: * such an action cannot be completed if it is on a guest node whose * host is unclean and cannot be fenced. */ pe_rsc_debug(action->rsc, "%s on %s is unrunnable " "(node's host cannot be fenced)", action->uuid, action->node->details->uname); pe__clear_action_flags(action, pe_action_runnable); } else { pe_rsc_trace(action->rsc, "%s on %s does not require fencing or quorum", action->uuid, action->node->details->uname); pe__set_action_flags(action, pe_action_runnable); } } else { switch (effective_quorum_policy(action->rsc, data_set)) { case no_quorum_stop: pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)", action->uuid, action->node->details->uname); pe__clear_action_flags(action, pe_action_runnable); pe_action_set_reason(action, "no quorum", true); break; case no_quorum_freeze: if (!action->rsc->fns->active(action->rsc, TRUE) || (action->rsc->next_role > action->rsc->role)) { pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)", action->uuid, action->node->details->uname); pe__clear_action_flags(action, pe_action_runnable); pe_action_set_reason(action, "quorum freeze", true); } break; default: //pe_action_set_reason(action, NULL, TRUE); pe__set_action_flags(action, pe_action_runnable); break; } } } /*! * \internal * \brief Update a resource object's flags for a new action on it * * \param[in] rsc Resource that action is for (if any) * \param[in] action New action */ static void update_resource_flags_for_action(pe_resource_t *rsc, pe_action_t *action) { /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used * within Pacemaker, and should be deprecated and eventually removed */ if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) { pe__set_resource_flags(rsc, pe_rsc_stopping); } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) { if (pcmk_is_set(action->flags, pe_action_runnable)) { pe__set_resource_flags(rsc, pe_rsc_starting); } else { pe__clear_resource_flags(rsc, pe_rsc_starting); } } } -/*! - * \brief Create or update an action object - * - * \param[in] rsc Resource that action is for (if any) - * \param[in] key Action key (must be non-NULL) - * \param[in] task Action name (must be non-NULL) - * \param[in] on_node Node that action is on (if any) - * \param[in] optional Whether action should be considered optional - * \param[in] save_action Whether action should be recorded in transition graph - * \param[in] data_set Cluster working set - * - * \return Action object corresponding to arguments - * \note This function takes ownership of (and might free) \p key. If - * \p save_action is true, \p data_set will own the returned action, - * otherwise it is the caller's responsibility to free the return value - * with pe_free_action(). - */ -pe_action_t * -custom_action(pe_resource_t *rsc, char *key, const char *task, - pe_node_t *on_node, gboolean optional, gboolean save_action, - pe_working_set_t *data_set) +static bool +valid_stop_on_fail(const char *value) { - pe_action_t *action = NULL; + return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL); +} - CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL)); - - if (save_action) { - action = find_existing_action(key, rsc, on_node, data_set); - } - - if (action == NULL) { - action = new_action(key, task, rsc, on_node, optional, save_action, - data_set); - } else { - free(key); - } - - update_action_optional(action, optional); - - if (rsc != NULL) { - if (action->node != NULL) { - unpack_action_node_attributes(action, data_set); - } - - update_resource_action_runnable(action, save_action, data_set); - - if (save_action) { - update_resource_flags_for_action(rsc, action); - } - } - - return action; -} - -static bool -valid_stop_on_fail(const char *value) -{ - return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL); -} - -static const char * -unpack_operation_on_fail(pe_action_t * action) -{ - - const char *name = NULL; - const char *role = NULL; - const char *on_fail = NULL; - const char *interval_spec = NULL; - const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); +static const char * +unpack_operation_on_fail(pe_action_t * action) +{ + const char *name = NULL; + const char *role = NULL; + const char *on_fail = NULL; + const char *interval_spec = NULL; + const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei) && !valid_stop_on_fail(value)) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " "action to default value because '%s' is not " "allowed for stop", action->rsc->id, value); return NULL; } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) { // demote on_fail defaults to monitor value for promoted role if present xmlNode *operation = NULL; CRM_CHECK(action->rsc != NULL, return NULL); for (operation = pcmk__xe_first_child(action->rsc->ops_xml); (operation != NULL) && (value == NULL); operation = pcmk__xe_next(operation)) { bool enabled = false; if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { continue; } name = crm_element_value(operation, "name"); role = crm_element_value(operation, "role"); on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); if (!on_fail) { continue; } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) { continue; } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei) || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S, RSC_ROLE_PROMOTED_LEGACY_S, NULL)) { continue; } else if (crm_parse_interval_spec(interval_spec) == 0) { continue; } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) { continue; } value = on_fail; } } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { value = "ignore"; } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { name = crm_element_value(action->op_entry, "name"); role = crm_element_value(action->op_entry, "role"); interval_spec = crm_element_value(action->op_entry, XML_LRM_ATTR_INTERVAL); if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei) && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei) || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S, RSC_ROLE_PROMOTED_LEGACY_S, NULL) || (crm_parse_interval_spec(interval_spec) == 0))) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s " "action to default value because 'demote' is not " "allowed for it", action->rsc->id, name); return NULL; } } return value; } -static xmlNode * -find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled) -{ - guint interval_ms = 0; - guint min_interval_ms = G_MAXUINT; - const char *name = NULL; - const char *interval_spec = NULL; - xmlNode *op = NULL; - xmlNode *operation = NULL; - - for (operation = pcmk__xe_first_child(rsc->ops_xml); - operation != NULL; - operation = pcmk__xe_next(operation)) { - - if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { - bool enabled = false; - - name = crm_element_value(operation, "name"); - interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); - if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && - !enabled) { - continue; - } - - if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) { - continue; - } - - interval_ms = crm_parse_interval_spec(interval_spec); - - if (interval_ms && (interval_ms < min_interval_ms)) { - min_interval_ms = interval_ms; - op = operation; - } - } - } - - return op; -} - static int -unpack_start_delay(const char *value, GHashTable *meta) +unpack_timeout(const char *value) { - int start_delay = 0; - - if (value != NULL) { - start_delay = crm_get_msec(value); - - if (start_delay < 0) { - start_delay = 0; - } + int timeout_ms = crm_get_msec(value); - if (meta) { - g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), - pcmk__itoa(start_delay)); - } + if (timeout_ms < 0) { + timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); } - - return start_delay; + return timeout_ms; } // true if value contains valid, non-NULL interval origin for recurring op static bool unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms, crm_time_t *now, long long *start_delay) { long long result = 0; guint interval_sec = interval_ms / 1000; crm_time_t *origin = NULL; // Ignore unspecified values and non-recurring operations if ((value == NULL) || (interval_ms == 0) || (now == NULL)) { return false; } // Parse interval origin from text origin = crm_time_new(value); if (origin == NULL) { pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation " "'%s' because '%s' is not valid", (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value); return false; } // Get seconds since origin (negative if origin is in the future) result = crm_time_get_seconds(now) - crm_time_get_seconds(origin); crm_time_free(origin); // Calculate seconds from closest interval to now result = result % interval_sec; // Calculate seconds remaining until next interval result = ((result <= 0)? 0 : interval_sec) - result; crm_info("Calculated a start delay of %llds for operation '%s'", result, (ID(xml_obj)? ID(xml_obj) : "(unspecified)")); if (start_delay != NULL) { *start_delay = result * 1000; // milliseconds } return true; } static int -unpack_timeout(const char *value) +unpack_start_delay(const char *value, GHashTable *meta) { - int timeout_ms = crm_get_msec(value); + int start_delay = 0; - if (timeout_ms < 0) { - timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); + if (value != NULL) { + start_delay = crm_get_msec(value); + + if (start_delay < 0) { + start_delay = 0; + } + + if (meta) { + g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), + pcmk__itoa(start_delay)); + } } - return timeout_ms; + + return start_delay; } -int -pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set) +#if ENABLE_VERSIONED_ATTRS +pe_rsc_action_details_t * +pe_rsc_action_details(pe_action_t *action) { - xmlNode *child = NULL; - GHashTable *action_meta = NULL; - const char *timeout_spec = NULL; - int timeout_ms = 0; + pe_rsc_action_details_t *details; - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; + CRM_CHECK(action != NULL, return NULL); - for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); - child != NULL; child = crm_next_same_xml(child)) { - if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME), - pcmk__str_casei)) { - timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT); - break; - } + if (action->action_details == NULL) { + action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); + CRM_CHECK(action->action_details != NULL, return NULL); } - if (timeout_spec == NULL && data_set->op_defaults) { - action_meta = pcmk__strkey_table(free, free); - pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, - &rule_data, action_meta, NULL, FALSE, data_set); - timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); + details = (pe_rsc_action_details_t *) action->action_details; + if (details->versioned_parameters == NULL) { + details->versioned_parameters = create_xml_node(NULL, + XML_TAG_OP_VER_ATTRS); } + if (details->versioned_meta == NULL) { + details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); + } + return details; +} - // @TODO check meta-attributes (including versioned meta-attributes) - // @TODO maybe use min-interval monitor timeout as default for monitors +static void +pe_free_rsc_action_details(pe_action_t *action) +{ + pe_rsc_action_details_t *details; - timeout_ms = crm_get_msec(timeout_spec); - if (timeout_ms < 0) { - timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); + if ((action == NULL) || (action->action_details == NULL)) { + return; } - if (action_meta != NULL) { - g_hash_table_destroy(action_meta); + details = (pe_rsc_action_details_t *) action->action_details; + + if (details->versioned_parameters) { + free_xml(details->versioned_parameters); } - return timeout_ms; + if (details->versioned_meta) { + free_xml(details->versioned_meta); + } + + action->action_details = NULL; } -#if ENABLE_VERSIONED_ATTRS static void unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, guint interval_ms, crm_time_t *now) { xmlNode *attrs = NULL; xmlNode *attr = NULL; for (attrs = pcmk__xe_first_child(versioned_meta); attrs != NULL; attrs = pcmk__xe_next(attrs)) { for (attr = pcmk__xe_first_child(attrs); attr != NULL; attr = pcmk__xe_next(attr)) { const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); if (pcmk__str_eq(name, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) { int start_delay = unpack_start_delay(value, NULL); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } else if (pcmk__str_eq(name, XML_OP_ATTR_ORIGIN, pcmk__str_casei)) { long long start_delay = 0; if (unpack_interval_origin(value, xml_obj, interval_ms, now, &start_delay)) { crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY); crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay); } } else if (pcmk__str_eq(name, XML_ATTR_TIMEOUT, pcmk__str_casei)) { int timeout_ms = unpack_timeout(value); crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout_ms); } } } } #endif +static xmlNode * +find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled) +{ + guint interval_ms = 0; + guint min_interval_ms = G_MAXUINT; + const char *name = NULL; + const char *interval_spec = NULL; + xmlNode *op = NULL; + xmlNode *operation = NULL; + + for (operation = pcmk__xe_first_child(rsc->ops_xml); + operation != NULL; + operation = pcmk__xe_next(operation)) { + + if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { + bool enabled = false; + + name = crm_element_value(operation, "name"); + interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); + if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && + !enabled) { + continue; + } + + if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) { + continue; + } + + interval_ms = crm_parse_interval_spec(interval_spec); + + if (interval_ms && (interval_ms < min_interval_ms)) { + min_interval_ms = interval_ms; + op = operation; + } + } + } + + return op; +} + /*! * \brief Unpack operation XML into an action structure * * Unpack an operation's meta-attributes (normalizing the interval, timeout, * and start delay values as integer milliseconds), requirements, and * failure policy. * * \param[in,out] action Action to unpack into * \param[in] xml_obj Operation XML (or NULL if all defaults) * \param[in] container Resource that contains affected resource, if any * \param[in] data_set Cluster state * \param[in] interval_ms How frequently to perform the operation */ static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, pe_working_set_t * data_set, guint interval_ms) { int timeout_ms = 0; const char *value = NULL; bool is_probe = false; #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *rsc_details = NULL; #endif pe_rsc_eval_data_t rsc_rule_data = { .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS), .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER), .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE) }; pe_op_eval_data_t op_rule_data = { .op_name = action->task, .interval = interval_ms }; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .role = RSC_ROLE_UNKNOWN, .now = data_set->now, .match_data = NULL, .rsc_data = &rsc_rule_data, .op_data = &op_rule_data }; CRM_CHECK(action && action->rsc, return); is_probe = pcmk_is_probe(action->task, interval_ms); // Cluster-wide pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, action->meta, NULL, FALSE, data_set); // Determine probe default timeout differently if (is_probe) { xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); if (min_interval_mon) { value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); if (value) { crm_trace("\t%s: Setting default timeout to minimum-interval " "monitor's timeout '%s'", action->uuid, value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), strdup(value)); } } } if (xml_obj) { xmlAttrPtr xIter = NULL; // take precedence over defaults pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data, action->meta, NULL, TRUE, data_set); #if ENABLE_VERSIONED_ATTRS rsc_details = pe_rsc_action_details(action); /* Non-versioned attributes also unpack XML_TAG_ATTR_SETS, but that * capability is deprecated, so we don't need to extend that support to * versioned attributes. */ pe_eval_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, &rule_data, rsc_details->versioned_meta, NULL); #endif /* Anything set as an XML property has highest precedence. * This ensures we use the name and interval from the tag. */ for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = crm_element_value(xml_obj, prop_name); g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); } } g_hash_table_remove(action->meta, "id"); // Normalize interval to milliseconds if (interval_ms > 0) { g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL), crm_strdup_printf("%u", interval_ms)); } else { g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL); } /* * Timeout order of precedence: * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params * and task is start or a probe; pcmk_monitor_timeout works * by default for a recurring monitor) * 2. explicit op timeout on the primitive * 3. default op timeout * a. if probe, then min-interval monitor's timeout * b. else, in XML_CIB_TAG_OPCONFIG * 4. CRM_DEFAULT_OP_TIMEOUT_S * * #1 overrides general rule of XML property having highest * precedence. */ if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard), pcmk_ra_cap_fence_params) && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) || is_probe)) { GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set); value = g_hash_table_lookup(params, "pcmk_monitor_timeout"); if (value) { crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', " "overriding default", action->uuid, value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), strdup(value)); } } // Normalize timeout to positive milliseconds value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); timeout_ms = unpack_timeout(value); g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), pcmk__itoa(timeout_ms)); if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) { action->needs = rsc_req_nothing; value = "nothing (not start or promote)"; } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) { action->needs = rsc_req_stonith; value = "fencing"; } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) { action->needs = rsc_req_quorum; value = "quorum"; } else { action->needs = rsc_req_nothing; value = "nothing"; } pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value); value = unpack_operation_on_fail(action); if (value == NULL) { } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) { action->on_fail = action_fail_block; g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); value = "block"; // The above could destroy the original string } else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) { action->on_fail = action_fail_fence; value = "node fencing"; if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for " "operation '%s' to 'stop' because 'fence' is not " "valid when fencing is disabled", action->uuid); action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } } else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) { action->on_fail = action_fail_standby; value = "node standby"; } else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING, NULL)) { action->on_fail = action_fail_ignore; value = "ignore"; } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) { action->on_fail = action_fail_migrate; value = "force migration"; } else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop resource"; } else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate)"; } else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) { if (container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate)"; } else { value = NULL; } } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { action->on_fail = action_fail_demote; value = "demote instance"; } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; } /* defaults */ if (value == NULL && container) { action->on_fail = action_fail_restart_container; value = "restart container (and possibly migrate) (default)"; /* For remote nodes, ensure that any failure that results in dropping an * active connection to the node results in fencing of the node. * * There are only two action failures that don't result in fencing. * 1. probes - probe failures are expected. * 2. start - a start failure indicates that an active connection does not already * exist. The user can set op on-fail=fence if they really want to fence start * failures. */ } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed)) && pe__resource_is_remote_conn(action->rsc, data_set) && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei) && (interval_ms == 0)) && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) { if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) { action->on_fail = action_fail_stop; action->fail_role = RSC_ROLE_STOPPED; value = "stop unmanaged remote node (enforcing default)"; } else { if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { value = "fence remote node (default)"; } else { value = "recover remote node connection (default)"; } if (action->rsc->remote_reconnect_ms) { action->fail_role = RSC_ROLE_STOPPED; } action->on_fail = action_fail_reset_remote; } } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) { if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { action->on_fail = action_fail_fence; value = "resource fence (default)"; } else { action->on_fail = action_fail_block; value = "resource block (default)"; } } else if (value == NULL) { action->on_fail = action_fail_recover; value = "restart (and possibly migrate) (default)"; } pe_rsc_trace(action->rsc, "%s failure handling: %s", action->uuid, value); value = NULL; if (xml_obj != NULL) { value = g_hash_table_lookup(action->meta, "role_after_failure"); if (value) { pe_warn_once(pe_wo_role_after, "Support for role_after_failure is deprecated and will be removed in a future release"); } } if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { action->fail_role = text2role(value); } /* defaults */ - if (action->fail_role == RSC_ROLE_UNKNOWN) { - if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) { - action->fail_role = RSC_ROLE_UNPROMOTED; - } else { - action->fail_role = RSC_ROLE_STARTED; - } - } - pe_rsc_trace(action->rsc, "%s failure results in: %s", - action->uuid, role2text(action->fail_role)); - - value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); - if (value) { - unpack_start_delay(value, action->meta); - } else { - long long start_delay = 0; - - value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); - if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now, - &start_delay)) { - g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY), - crm_strdup_printf("%lld", start_delay)); - } - } - -#if ENABLE_VERSIONED_ATTRS - unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, - data_set->now); -#endif -} - -static xmlNode * -find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled) -{ - guint interval_ms = 0; - gboolean do_retry = TRUE; - char *local_key = NULL; - const char *name = NULL; - const char *interval_spec = NULL; - char *match_key = NULL; - xmlNode *op = NULL; - xmlNode *operation = NULL; - - retry: - for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; - operation = pcmk__xe_next(operation)) { - - if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { - bool enabled = false; - - name = crm_element_value(operation, "name"); - interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); - if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && - !enabled) { - continue; - } - - interval_ms = crm_parse_interval_spec(interval_spec); - match_key = pcmk__op_key(rsc->id, name, interval_ms); - if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { - op = operation; - } - free(match_key); - - if (rsc->clone_name) { - match_key = pcmk__op_key(rsc->clone_name, name, interval_ms); - if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { - op = operation; - } - free(match_key); - } - - if (op != NULL) { - free(local_key); - return op; - } - } - } - - free(local_key); - if (do_retry == FALSE) { - return NULL; - } - - do_retry = FALSE; - if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { - local_key = pcmk__op_key(rsc->id, "migrate", 0); - key = local_key; - goto retry; - - } else if (strstr(key, "_notify_")) { - local_key = pcmk__op_key(rsc->id, "notify", 0); - key = local_key; - goto retry; - } - - return NULL; -} - -xmlNode * -find_rsc_op_entry(pe_resource_t * rsc, const char *key) -{ - return find_rsc_op_entry_helper(rsc, key, FALSE); -} - -/* - * Used by the HashTable for-loop - */ -void -print_str_str(gpointer key, gpointer value, gpointer user_data) -{ - crm_trace("%s%s %s ==> %s", - user_data == NULL ? "" : (char *)user_data, - user_data == NULL ? "" : ": ", (char *)key, (char *)value); -} - -void -pe_free_action(pe_action_t * action) -{ - if (action == NULL) { - return; - } - g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */ - g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */ - if (action->extra) { - g_hash_table_destroy(action->extra); - } - if (action->meta) { - g_hash_table_destroy(action->meta); - } -#if ENABLE_VERSIONED_ATTRS - if (action->rsc) { - pe_free_rsc_action_details(action); - } -#endif - free(action->cancel_task); - free(action->reason); - free(action->task); - free(action->uuid); - free(action->node); - free(action); -} - -GList * -find_recurring_actions(GList *input, pe_node_t * not_on_node) -{ - const char *value = NULL; - GList *result = NULL; - GList *gIter = input; - - CRM_CHECK(input != NULL, return NULL); - - for (; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); - if (value == NULL) { - /* skip */ - } else if (pcmk__str_eq(value, "0", pcmk__str_casei)) { - /* skip */ - } else if (pcmk__str_eq(CRMD_ACTION_CANCEL, action->task, pcmk__str_casei)) { - /* skip */ - } else if (not_on_node == NULL) { - crm_trace("(null) Found: %s", action->uuid); - result = g_list_prepend(result, action); - - } else if (action->node == NULL) { - /* skip */ - } else if (action->node->details != not_on_node->details) { - crm_trace("Found: %s", action->uuid); - result = g_list_prepend(result, action); - } - } - - return result; -} - -enum action_tasks -get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic) -{ - enum action_tasks task = text2task(name); - - if (rsc == NULL) { - return task; - - } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { - switch (task) { - case stopped_rsc: - case started_rsc: - case action_demoted: - case action_promoted: - crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); - return task - 1; - default: - break; - } - } - return task; -} - -pe_action_t * -find_first_action(GList *input, const char *uuid, const char *task, pe_node_t * on_node) -{ - GList *gIter = NULL; - - CRM_CHECK(uuid || task, return NULL); - - for (gIter = input; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) { - continue; - - } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) { - continue; - - } else if (on_node == NULL) { - return action; - - } else if (action->node == NULL) { - continue; - - } else if (on_node->details == action->node->details) { - return action; - } - } - - return NULL; -} - -GList * -find_actions(GList *input, const char *key, const pe_node_t *on_node) -{ - GList *gIter = input; - GList *result = NULL; - - CRM_CHECK(key != NULL, return NULL); - - for (; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) { - continue; - - } else if (on_node == NULL) { - crm_trace("Action %s matches (ignoring node)", key); - result = g_list_prepend(result, action); - - } else if (action->node == NULL) { - crm_trace("Action %s matches (unallocated, assigning to %s)", - key, on_node->details->uname); - - action->node = pe__copy_node(on_node); - result = g_list_prepend(result, action); - - } else if (on_node->details == action->node->details) { - crm_trace("Action %s on %s matches", key, on_node->details->uname); - result = g_list_prepend(result, action); - } - } - - return result; -} - -GList * -find_actions_exact(GList *input, const char *key, const pe_node_t *on_node) -{ - GList *result = NULL; - - CRM_CHECK(key != NULL, return NULL); - - if (on_node == NULL) { - return NULL; - } - - for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - if ((action->node != NULL) - && pcmk__str_eq(key, action->uuid, pcmk__str_casei) - && pcmk__str_eq(on_node->details->id, action->node->details->id, - pcmk__str_casei)) { - - crm_trace("Action %s on %s matches", key, on_node->details->uname); - result = g_list_prepend(result, action); - } - } - - return result; -} - -/*! - * \brief Find all actions of given type for a resource - * - * \param[in] rsc Resource to search - * \param[in] node Find only actions scheduled on this node - * \param[in] task Action name to search for - * \param[in] require_node If TRUE, NULL node or action node will not match - * - * \return List of actions found (or NULL if none) - * \note If node is not NULL and require_node is FALSE, matching actions - * without a node will be assigned to node. - */ -GList * -pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, - const char *task, bool require_node) -{ - GList *result = NULL; - char *key = pcmk__op_key(rsc->id, task, 0); - - if (require_node) { - result = find_actions_exact(rsc->actions, key, node); - } else { - result = find_actions(rsc->actions, key, node); - } - free(key); - return result; -} - -static void -resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag) -{ - pe_node_t *match = NULL; - - if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never)) - && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) { - /* This string comparision may be fragile, but exclusive resources and - * exclusive nodes should not have the symmetric_default constraint - * applied to them. - */ - return; - - } else if (rsc->children) { - GList *gIter = rsc->children; - - for (; gIter != NULL; gIter = gIter->next) { - pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; - - resource_node_score(child_rsc, node, score, tag); - } - } - - pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); - match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); - if (match == NULL) { - match = pe__copy_node(node); - g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); - } - match->weight = pcmk__add_scores(match->weight, score); -} - -void -resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag, - pe_working_set_t * data_set) -{ - if (node != NULL) { - resource_node_score(rsc, node, score, tag); - - } else if (data_set != NULL) { - GList *gIter = data_set->nodes; - - for (; gIter != NULL; gIter = gIter->next) { - pe_node_t *node_iter = (pe_node_t *) gIter->data; - - resource_node_score(rsc, node_iter, score, tag); - } - - } else { - GHashTableIter iter; - pe_node_t *node_iter = NULL; - - g_hash_table_iter_init(&iter, rsc->allowed_nodes); - while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { - resource_node_score(rsc, node_iter, score, tag); - } - } - - if (node == NULL && score == -INFINITY) { - if (rsc->allocated_to) { - crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); - free(rsc->allocated_to); - rsc->allocated_to = NULL; - } - } -} - -#define sort_return(an_int, why) do { \ - free(a_uuid); \ - free(b_uuid); \ - crm_trace("%s (%d) %c %s (%d) : %s", \ - a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ - b_xml_id, b_call_id, why); \ - return an_int; \ - } while(0) - -int -pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, - bool same_node_default) -{ - int a_call_id = -1; - int b_call_id = -1; - - char *a_uuid = NULL; - char *b_uuid = NULL; - - const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); - const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); - - const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET); - const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET); - bool same_node = true; - - /* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via - * 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c). - * - * In case that any of the lrm_rsc_op entries doesn't have on_node - * attribute, we need to explicitly tell whether the two operations are on - * the same node. - */ - if (a_node == NULL || b_node == NULL) { - same_node = same_node_default; - - } else { - same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei); - } - - if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) { - /* We have duplicate lrm_rsc_op entries in the status - * section which is unlikely to be a good thing - * - we can handle it easily enough, but we need to get - * to the bottom of why it's happening. - */ - pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); - sort_return(0, "duplicate"); - } - - crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); - crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); - - if (a_call_id == -1 && b_call_id == -1) { - /* both are pending ops so it doesn't matter since - * stops are never pending - */ - sort_return(0, "pending"); - - } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) { - sort_return(-1, "call id"); - - } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) { - sort_return(1, "call id"); - - } else if (a_call_id >= 0 && b_call_id >= 0 - && (!same_node || a_call_id == b_call_id)) { - /* - * The op and last_failed_op are the same - * Order on last-rc-change - */ - time_t last_a = -1; - time_t last_b = -1; - - crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); - crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); - - crm_trace("rc-change: %lld vs %lld", - (long long) last_a, (long long) last_b); - if (last_a >= 0 && last_a < last_b) { - sort_return(-1, "rc-change"); - - } else if (last_b >= 0 && last_a > last_b) { - sort_return(1, "rc-change"); - } - sort_return(0, "rc-change"); - - } else { - /* One of the inputs is a pending operation - * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other - */ - - int a_id = -1; - int b_id = -1; - - const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); - const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); - - CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); - if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, - NULL)) { - sort_return(0, "bad magic a"); - } - if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, - NULL)) { - sort_return(0, "bad magic b"); - } - /* try to determine the relative age of the operation... - * some pending operations (e.g. a start) may have been superseded - * by a subsequent stop - * - * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last - */ - if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) { - /* - * some of the logic in here may be redundant... - * - * if the UUID from the TE doesn't match then one better - * be a pending operation. - * pending operations don't survive between elections and joins - * because we query the LRM directly - */ - - if (b_call_id == -1) { - sort_return(-1, "transition + call"); - - } else if (a_call_id == -1) { - sort_return(1, "transition + call"); - } - - } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { - sort_return(-1, "transition"); - - } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { - sort_return(1, "transition"); - } - } - - /* we should never end up here */ - CRM_CHECK(FALSE, sort_return(0, "default")); -} - -gint -sort_op_by_callid(gconstpointer a, gconstpointer b) -{ - const xmlNode *xml_a = a; - const xmlNode *xml_b = b; - - return pe__is_newer_op(xml_a, xml_b, true); -} - -time_t -get_effective_time(pe_working_set_t * data_set) -{ - if(data_set) { - if (data_set->now == NULL) { - crm_trace("Recording a new 'now'"); - data_set->now = crm_time_new(NULL); - } - return crm_time_get_seconds_since_epoch(data_set->now); - } - - crm_trace("Defaulting to 'now'"); - return time(NULL); -} - -gboolean -get_target_role(pe_resource_t * rsc, enum rsc_role_e * role) -{ - enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; - const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); - - CRM_CHECK(role != NULL, return FALSE); - - if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei) - || pcmk__str_eq("default", value, pcmk__str_casei)) { - return FALSE; + if (action->fail_role == RSC_ROLE_UNKNOWN) { + if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) { + action->fail_role = RSC_ROLE_UNPROMOTED; + } else { + action->fail_role = RSC_ROLE_STARTED; + } } + pe_rsc_trace(action->rsc, "%s failure results in: %s", + action->uuid, role2text(action->fail_role)); - local_role = text2role(value); - if (local_role == RSC_ROLE_UNKNOWN) { - pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " - "because '%s' is not valid", rsc->id, value); - return FALSE; - - } else if (local_role > RSC_ROLE_STARTED) { - if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { - if (local_role > RSC_ROLE_UNPROMOTED) { - /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ - return FALSE; - } + value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); + if (value) { + unpack_start_delay(value, action->meta); + } else { + long long start_delay = 0; - } else { - pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " - "because '%s' only makes sense for promotable " - "clones", rsc->id, value); - return FALSE; + value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); + if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now, + &start_delay)) { + g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY), + crm_strdup_printf("%lld", start_delay)); } } - *role = local_role; - return TRUE; +#if ENABLE_VERSIONED_ATTRS + unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, + data_set->now); +#endif } -gboolean -order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order) +/*! + * \brief Create or update an action object + * + * \param[in] rsc Resource that action is for (if any) + * \param[in] key Action key (must be non-NULL) + * \param[in] task Action name (must be non-NULL) + * \param[in] on_node Node that action is on (if any) + * \param[in] optional Whether action should be considered optional + * \param[in] save_action Whether action should be recorded in transition graph + * \param[in] data_set Cluster working set + * + * \return Action object corresponding to arguments + * \note This function takes ownership of (and might free) \p key. If + * \p save_action is true, \p data_set will own the returned action, + * otherwise it is the caller's responsibility to free the return value + * with pe_free_action(). + */ +pe_action_t * +custom_action(pe_resource_t *rsc, char *key, const char *task, + pe_node_t *on_node, gboolean optional, gboolean save_action, + pe_working_set_t *data_set) { - GList *gIter = NULL; - pe_action_wrapper_t *wrapper = NULL; - GList *list = NULL; + pe_action_t *action = NULL; - if (order == pe_order_none) { - return FALSE; + CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL)); + + if (save_action) { + action = find_existing_action(key, rsc, on_node, data_set); } - if (lh_action == NULL || rh_action == NULL) { - return FALSE; + if (action == NULL) { + action = new_action(key, task, rsc, on_node, optional, save_action, + data_set); + } else { + free(key); } - crm_trace("Creating action wrappers for ordering: %s then %s", - lh_action->uuid, rh_action->uuid); + update_action_optional(action, optional); - /* Ensure we never create a dependency on ourselves... it's happened */ - CRM_ASSERT(lh_action != rh_action); + if (rsc != NULL) { + if (action->node != NULL) { + unpack_action_node_attributes(action, data_set); + } - /* Filter dups, otherwise update_action_states() has too much work to do */ - gIter = lh_action->actions_after; - for (; gIter != NULL; gIter = gIter->next) { - pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data; + update_resource_action_runnable(action, save_action, data_set); - if (after->action == rh_action && (after->type & order)) { - return FALSE; + if (save_action) { + update_resource_flags_for_action(rsc, action); } } - wrapper = calloc(1, sizeof(pe_action_wrapper_t)); - wrapper->action = rh_action; - wrapper->type = order; - list = lh_action->actions_after; - list = g_list_prepend(list, wrapper); - lh_action->actions_after = list; - - wrapper = calloc(1, sizeof(pe_action_wrapper_t)); - wrapper->action = lh_action; - wrapper->type = order; - list = rh_action->actions_before; - list = g_list_prepend(list, wrapper); - rh_action->actions_before = list; - return TRUE; + return action; } pe_action_t * get_pseudo_op(const char *name, pe_working_set_t * data_set) { pe_action_t *op = lookup_singleton(data_set, name); if (op == NULL) { op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); } return op; } -void -destroy_ticket(gpointer data) -{ - pe_ticket_t *ticket = data; - - if (ticket->state) { - g_hash_table_destroy(ticket->state); - } - free(ticket->id); - free(ticket); -} - -pe_ticket_t * -ticket_new(const char *ticket_id, pe_working_set_t * data_set) -{ - pe_ticket_t *ticket = NULL; - - if (pcmk__str_empty(ticket_id)) { - return NULL; - } - - if (data_set->tickets == NULL) { - data_set->tickets = pcmk__strkey_table(free, destroy_ticket); - } - - ticket = g_hash_table_lookup(data_set->tickets, ticket_id); - if (ticket == NULL) { - - ticket = calloc(1, sizeof(pe_ticket_t)); - if (ticket == NULL) { - crm_err("Cannot allocate ticket '%s'", ticket_id); - return NULL; - } - - crm_trace("Creaing ticket entry for %s", ticket_id); - - ticket->id = strdup(ticket_id); - ticket->granted = FALSE; - ticket->last_granted = -1; - ticket->standby = FALSE; - ticket->state = pcmk__strkey_table(free, free); - - g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); - } - - return ticket; -} - -const char *rsc_printable_id(pe_resource_t *rsc) -{ - if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { - return ID(rsc->xml); - } - return rsc->id; -} - -void -pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) -{ - pe__clear_resource_flags(rsc, flags); - for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { - pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags); - } -} - -void -pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag) -{ - for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { - pe_resource_t *r = (pe_resource_t *) lpc->data; - pe__clear_resource_flags_recursive(r, flag); - } -} - -void -pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) -{ - pe__set_resource_flags(rsc, flags); - for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { - pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags); - } -} - static GList * find_unfencing_devices(GList *candidates, GList *matches) { for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) { pe_resource_t *candidate = gIter->data; if (candidate->children != NULL) { matches = find_unfencing_devices(candidate->children, matches); } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) { continue; } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) { matches = g_list_prepend(matches, candidate); } else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta, PCMK_STONITH_PROVIDES), PCMK__VALUE_UNFENCING, pcmk__str_casei)) { matches = g_list_prepend(matches, candidate); } } return matches; } static int node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set) { int member_count = 0; int online_count = 0; int top_priority = 0; int lowest_priority = 0; GList *gIter = NULL; // `priority-fencing-delay` is disabled if (data_set->priority_fencing_delay <= 0) { return 0; } /* No need to request a delay if the fencing target is not a normal cluster * member, for example if it's a remote node or a guest node. */ if (node->details->type != node_member) { return 0; } // No need to request a delay if the fencing target is in our partition if (node->details->online) { return 0; } for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { pe_node_t *n = gIter->data; if (n->details->type != node_member) { continue; } member_count ++; if (n->details->online) { online_count++; } if (member_count == 1 || n->details->priority > top_priority) { top_priority = n->details->priority; } if (member_count == 1 || n->details->priority < lowest_priority) { lowest_priority = n->details->priority; } } // No need to delay if we have more than half of the cluster members if (online_count > member_count / 2) { return 0; } /* All the nodes have equal priority. * Any configured corresponding `pcmk_delay_base/max` will be applied. */ if (lowest_priority == top_priority) { return 0; } if (node->details->priority < top_priority) { return 0; } return data_set->priority_fencing_delay; } pe_action_t * pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t * data_set) { char *op_key = NULL; pe_action_t *stonith_op = NULL; if(op == NULL) { op = data_set->stonith_action; } op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); stonith_op = lookup_singleton(data_set, op_key); if(stonith_op == NULL) { stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); add_hash_param(stonith_op->meta, "stonith_action", op); if (pe__is_guest_or_remote_node(node) && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { /* Extra work to detect device changes on remotes * * We may do this for all nodes in the future, but for now * the pcmk__check_action_config() based stuff works fine. */ long max = 1024; long digests_all_offset = 0; long digests_secure_offset = 0; char *digests_all = calloc(max, sizeof(char)); char *digests_secure = calloc(max, sizeof(char)); GList *matches = find_unfencing_devices(data_set->resources, NULL); for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) { pe_resource_t *match = gIter->data; const char *agent = g_hash_table_lookup(match->meta, XML_ATTR_TYPE); op_digest_cache_t *data = NULL; data = pe__compare_fencing_digest(match, agent, node, data_set); if(data->rc == RSC_DIGEST_ALL) { optional = FALSE; crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); if (!pcmk__is_daemon && data_set->priv != NULL) { pcmk__output_t *out = data_set->priv; out->info(out, "notice: Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); } } digests_all_offset += snprintf( digests_all+digests_all_offset, max-digests_all_offset, "%s:%s:%s,", match->id, agent, data->digest_all_calc); digests_secure_offset += snprintf( digests_secure+digests_secure_offset, max-digests_secure_offset, "%s:%s:%s,", match->id, agent, data->digest_secure_calc); } g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_ALL), digests_all); g_hash_table_insert(stonith_op->meta, strdup(XML_OP_ATTR_DIGESTS_SECURE), digests_secure); } } else { free(op_key); } if (data_set->priority_fencing_delay > 0 /* It's a suitable case where `priority-fencing-delay` applies. * At least add `priority-fencing-delay` field as an indicator. */ && (priority_delay /* The priority delay needs to be recalculated if this function has * been called by schedule_fencing_and_shutdowns() after node * priority has already been calculated by native_add_running(). */ || g_hash_table_lookup(stonith_op->meta, XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) { /* Add `priority-fencing-delay` to the fencing op even if it's 0 for * the targeting node. So that it takes precedence over any possible * `pcmk_delay_base/max`. */ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set)); g_hash_table_insert(stonith_op->meta, strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY), delay_s); } if(optional == FALSE && pe_can_fence(data_set, node)) { pe__clear_action_flags(stonith_op, pe_action_optional); pe_action_set_reason(stonith_op, reason, false); } else if(reason && stonith_op->reason == NULL) { stonith_op->reason = strdup(reason); } return stonith_op; } void -trigger_unfencing( - pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set) +pe_free_action(pe_action_t * action) { - if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { - /* No resources require it */ + if (action == NULL) { return; + } + g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */ + g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */ + if (action->extra) { + g_hash_table_destroy(action->extra); + } + if (action->meta) { + g_hash_table_destroy(action->meta); + } +#if ENABLE_VERSIONED_ATTRS + if (action->rsc) { + pe_free_rsc_action_details(action); + } +#endif + free(action->cancel_task); + free(action->reason); + free(action->task); + free(action->uuid); + free(action->node); + free(action); +} - } else if ((rsc != NULL) - && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) { - /* Wasn't a stonith device */ - return; +int +pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set) +{ + xmlNode *child = NULL; + GHashTable *action_meta = NULL; + const char *timeout_spec = NULL; + int timeout_ms = 0; - } else if(node - && node->details->online - && node->details->unclean == FALSE - && node->details->shutdown == FALSE) { - pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set); + pe_rule_eval_data_t rule_data = { + .node_hash = NULL, + .role = RSC_ROLE_UNKNOWN, + .now = data_set->now, + .match_data = NULL, + .rsc_data = NULL, + .op_data = NULL + }; - if(dependency) { - order_actions(unfence, dependency, pe_order_optional); + for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); + child != NULL; child = crm_next_same_xml(child)) { + if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME), + pcmk__str_casei)) { + timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT); + break; } + } + + if (timeout_spec == NULL && data_set->op_defaults) { + action_meta = pcmk__strkey_table(free, free); + pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, + &rule_data, action_meta, NULL, FALSE, data_set); + timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); + } - } else if(rsc) { - GHashTableIter iter; + // @TODO check meta-attributes (including versioned meta-attributes) + // @TODO maybe use min-interval monitor timeout as default for monitors - g_hash_table_iter_init(&iter, rsc->allowed_nodes); - while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { - if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { - trigger_unfencing(rsc, node, reason, dependency, data_set); - } + timeout_ms = crm_get_msec(timeout_spec); + if (timeout_ms < 0) { + timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); + } + + if (action_meta != NULL) { + g_hash_table_destroy(action_meta); + } + return timeout_ms; +} + +enum action_tasks +get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic) +{ + enum action_tasks task = text2task(name); + + if (rsc == NULL) { + return task; + + } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { + switch (task) { + case stopped_rsc: + case started_rsc: + case action_demoted: + case action_promoted: + crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); + return task - 1; + default: + break; + } + } + return task; +} + +pe_action_t * +find_first_action(GList *input, const char *uuid, const char *task, pe_node_t * on_node) +{ + GList *gIter = NULL; + + CRM_CHECK(uuid || task, return NULL); + + for (gIter = input; gIter != NULL; gIter = gIter->next) { + pe_action_t *action = (pe_action_t *) gIter->data; + + if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) { + continue; + + } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) { + continue; + + } else if (on_node == NULL) { + return action; + + } else if (action->node == NULL) { + continue; + + } else if (on_node->details == action->node->details) { + return action; + } + } + + return NULL; +} + +GList * +find_actions(GList *input, const char *key, const pe_node_t *on_node) +{ + GList *gIter = input; + GList *result = NULL; + + CRM_CHECK(key != NULL, return NULL); + + for (; gIter != NULL; gIter = gIter->next) { + pe_action_t *action = (pe_action_t *) gIter->data; + + if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) { + continue; + + } else if (on_node == NULL) { + crm_trace("Action %s matches (ignoring node)", key); + result = g_list_prepend(result, action); + + } else if (action->node == NULL) { + crm_trace("Action %s matches (unallocated, assigning to %s)", + key, on_node->details->uname); + + action->node = pe__copy_node(on_node); + result = g_list_prepend(result, action); + + } else if (on_node->details == action->node->details) { + crm_trace("Action %s on %s matches", key, on_node->details->uname); + result = g_list_prepend(result, action); } } + + return result; } -gboolean -add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) +GList * +find_actions_exact(GList *input, const char *key, const pe_node_t *on_node) { - pe_tag_t *tag = NULL; - GList *gIter = NULL; - gboolean is_existing = FALSE; + GList *result = NULL; - CRM_CHECK(tags && tag_name && obj_ref, return FALSE); + CRM_CHECK(key != NULL, return NULL); - tag = g_hash_table_lookup(tags, tag_name); - if (tag == NULL) { - tag = calloc(1, sizeof(pe_tag_t)); - if (tag == NULL) { - return FALSE; - } - tag->id = strdup(tag_name); - tag->refs = NULL; - g_hash_table_insert(tags, strdup(tag_name), tag); + if (on_node == NULL) { + return NULL; } - for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { - const char *existing_ref = (const char *) gIter->data; + for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { + pe_action_t *action = (pe_action_t *) gIter->data; - if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){ - is_existing = TRUE; - break; + if ((action->node != NULL) + && pcmk__str_eq(key, action->uuid, pcmk__str_casei) + && pcmk__str_eq(on_node->details->id, action->node->details->id, + pcmk__str_casei)) { + + crm_trace("Action %s on %s matches", key, on_node->details->uname); + result = g_list_prepend(result, action); } } - if (is_existing == FALSE) { - tag->refs = g_list_append(tag->refs, strdup(obj_ref)); - crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); - } + return result; +} + +/*! + * \brief Find all actions of given type for a resource + * + * \param[in] rsc Resource to search + * \param[in] node Find only actions scheduled on this node + * \param[in] task Action name to search for + * \param[in] require_node If TRUE, NULL node or action node will not match + * + * \return List of actions found (or NULL if none) + * \note If node is not NULL and require_node is FALSE, matching actions + * without a node will be assigned to node. + */ +GList * +pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, + const char *task, bool require_node) +{ + GList *result = NULL; + char *key = pcmk__op_key(rsc->id, task, 0); - return TRUE; + if (require_node) { + result = find_actions_exact(rsc->actions, key, node); + } else { + result = find_actions(rsc->actions, key, node); + } + free(key); + return result; } /*! * \internal * \brief Create an action reason string based on the action itself * * \param[in] action Action to create reason string for * \param[in] flag Action flag that was cleared * * \return Newly allocated string suitable for use as action reason * \note It is the caller's responsibility to free() the result. */ char * pe__action2reason(pe_action_t *action, enum pe_action_flags flag) { const char *change = NULL; switch (flag) { case pe_action_runnable: case pe_action_migrate_runnable: change = "unrunnable"; break; case pe_action_optional: change = "required"; break; default: // Bug: caller passed unsupported flag CRM_CHECK(change != NULL, change = ""); break; } return crm_strdup_printf("%s%s%s %s", change, (action->rsc == NULL)? "" : " ", (action->rsc == NULL)? "" : action->rsc->id, action->task); } void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) { if (action->reason != NULL && overwrite) { pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, pcmk__s(reason, "(none)")); } else if (action->reason == NULL) { pe_rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, pcmk__s(reason, "(none)")); } else { // crm_assert(action->reason != NULL && !overwrite); return; } pcmk__str_update(&action->reason, reason); } -/*! - * \internal - * \brief Check whether shutdown has been requested for a node - * - * \param[in] node Node to check - * - * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise - * \note This differs from simply using node->details->shutdown in that it can - * be used before that has been determined (and in fact to determine it), - * and it can also be used to distinguish requested shutdown from implicit - * shutdown of remote nodes by virtue of their connection stopping. - */ -bool -pe__shutdown_requested(pe_node_t *node) -{ - const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); - - return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches); -} - -/*! - * \internal - * \brief Update a data set's "recheck by" time - * - * \param[in] recheck Epoch time when recheck should happen - * \param[in,out] data_set Current working set - */ -void -pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) -{ - if ((recheck > get_effective_time(data_set)) - && ((data_set->recheck_by == 0) - || (data_set->recheck_by > recheck))) { - data_set->recheck_by = recheck; - } -} - -/*! - * \internal - * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set - */ -void -pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, - pe_rule_eval_data_t *rule_data, GHashTable *hash, - const char *always_first, gboolean overwrite, - pe_working_set_t *data_set) -{ - crm_time_t *next_change = crm_time_new_undefined(); - - pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash, - always_first, overwrite, next_change); - if (crm_time_is_defined(next_change)) { - time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); - - pe__update_recheck_time(recheck, data_set); - } - crm_time_free(next_change); -} - -bool -pe__resource_is_disabled(pe_resource_t *rsc) -{ - const char *target_role = NULL; - - CRM_CHECK(rsc != NULL, return false); - target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); - if (target_role) { - enum rsc_role_e target_role_e = text2role(target_role); - - if ((target_role_e == RSC_ROLE_STOPPED) - || ((target_role_e == RSC_ROLE_UNPROMOTED) - && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) { - return true; - } - } - return false; -} - /*! * \internal * \brief Create an action to clear a resource's history from CIB * * \param[in] rsc Resource to clear * \param[in] node Node to clear history on * * \return New action to clear resource history */ pe_action_t * pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set) { char *key = NULL; CRM_ASSERT(rsc && node); key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0); return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE, data_set); } -bool -pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list) +#define sort_return(an_int, why) do { \ + free(a_uuid); \ + free(b_uuid); \ + crm_trace("%s (%d) %c %s (%d) : %s", \ + a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ + b_xml_id, b_call_id, why); \ + return an_int; \ + } while(0) + +int +pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, + bool same_node_default) { - for (GList *ele = rsc->running_on; ele; ele = ele->next) { - pe_node_t *node = (pe_node_t *) ele->data; - if (pcmk__str_in_list(node->details->uname, node_list, - pcmk__str_star_matches|pcmk__str_casei)) { - return true; - } - } + int a_call_id = -1; + int b_call_id = -1; - return false; -} + char *a_uuid = NULL; + char *b_uuid = NULL; -bool -pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node) -{ - return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node)); -} + const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); + const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); -GList * -pe__filter_rsc_list(GList *rscs, GList *filter) -{ - GList *retval = NULL; + const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET); + const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET); + bool same_node = true; + + /* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via + * 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c). + * + * In case that any of the lrm_rsc_op entries doesn't have on_node + * attribute, we need to explicitly tell whether the two operations are on + * the same node. + */ + if (a_node == NULL || b_node == NULL) { + same_node = same_node_default; - for (GList *gIter = rscs; gIter; gIter = gIter->next) { - pe_resource_t *rsc = (pe_resource_t *) gIter->data; + } else { + same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei); + } - /* I think the second condition is safe here for all callers of this - * function. If not, it needs to move into pe__node_text. + if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) { + /* We have duplicate lrm_rsc_op entries in the status + * section which is unlikely to be a good thing + * - we can handle it easily enough, but we need to get + * to the bottom of why it's happening. */ - if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) || - (rsc->parent && pcmk__str_in_list(rsc_printable_id(rsc->parent), filter, pcmk__str_star_matches))) { - retval = g_list_prepend(retval, rsc); - } + pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); + sort_return(0, "duplicate"); } - return retval; -} + crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); + crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); -GList * -pe__build_node_name_list(pe_working_set_t *data_set, const char *s) { - GList *nodes = NULL; + if (a_call_id == -1 && b_call_id == -1) { + /* both are pending ops so it doesn't matter since + * stops are never pending + */ + sort_return(0, "pending"); - if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { - /* Nothing was given so return a list of all node names. Or, '*' was - * given. This would normally fall into the pe__unames_with_tag branch - * where it will return an empty list. Catch it here instead. + } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) { + sort_return(-1, "call id"); + + } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) { + sort_return(1, "call id"); + + } else if (a_call_id >= 0 && b_call_id >= 0 + && (!same_node || a_call_id == b_call_id)) { + /* + * The op and last_failed_op are the same + * Order on last-rc-change */ - nodes = g_list_prepend(nodes, strdup("*")); - } else { - pe_node_t *node = pe_find_node(data_set->nodes, s); + time_t last_a = -1; + time_t last_b = -1; - if (node) { - /* The given string was a valid uname for a node. Return a - * singleton list containing just that uname. - */ - nodes = g_list_prepend(nodes, strdup(s)); - } else { - /* The given string was not a valid uname. It's either a tag or - * it's a typo or something. In the first case, we'll return a - * list of all the unames of the nodes with the given tag. In the - * second case, we'll return a NULL pointer and nothing will - * get displayed. - */ - nodes = pe__unames_with_tag(data_set, s); - } - } + crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); + crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); - return nodes; -} + crm_trace("rc-change: %lld vs %lld", + (long long) last_a, (long long) last_b); + if (last_a >= 0 && last_a < last_b) { + sort_return(-1, "rc-change"); -GList * -pe__build_rsc_list(pe_working_set_t *data_set, const char *s) { - GList *resources = NULL; + } else if (last_b >= 0 && last_a > last_b) { + sort_return(1, "rc-change"); + } + sort_return(0, "rc-change"); - if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { - resources = g_list_prepend(resources, strdup("*")); } else { - pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s, - pe_find_renamed|pe_find_any); - - if (rsc) { - /* A colon in the name we were given means we're being asked to filter - * on a specific instance of a cloned resource. Put that exact string - * into the filter list. Otherwise, use the printable ID of whatever - * resource was found that matches what was asked for. + /* One of the inputs is a pending operation + * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other + */ + + int a_id = -1; + int b_id = -1; + + const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); + const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); + + CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); + if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, + NULL)) { + sort_return(0, "bad magic a"); + } + if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, + NULL)) { + sort_return(0, "bad magic b"); + } + /* try to determine the relative age of the operation... + * some pending operations (e.g. a start) may have been superseded + * by a subsequent stop + * + * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last + */ + if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) { + /* + * some of the logic in here may be redundant... + * + * if the UUID from the TE doesn't match then one better + * be a pending operation. + * pending operations don't survive between elections and joins + * because we query the LRM directly */ - if (strstr(s, ":") != NULL) { - resources = g_list_prepend(resources, strdup(rsc->id)); - } else { - resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc))); + + if (b_call_id == -1) { + sort_return(-1, "transition + call"); + + } else if (a_call_id == -1) { + sort_return(1, "transition + call"); } - } else { - /* The given string was not a valid resource name. It's either - * a tag or it's a typo or something. See build_uname_list for - * more detail. - */ - resources = pe__rscs_with_tag(data_set, s); + + } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { + sort_return(-1, "transition"); + + } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { + sort_return(1, "transition"); } } - return resources; + /* we should never end up here */ + CRM_CHECK(FALSE, sort_return(0, "default")); } -xmlNode * -pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name) +gint +sort_op_by_callid(gconstpointer a, gconstpointer b) { - pe_resource_t *parent = uber_parent(rsc); - const char *rsc_id = rsc->id; - - if (rsc->variant == pe_clone) { - rsc_id = pe__clone_child_id(rsc); - } else if (parent->variant == pe_clone) { - rsc_id = pe__clone_child_id(parent); - } - - for (xmlNode *xml_op = pcmk__xml_first_child(rsc->cluster->failed); xml_op != NULL; - xml_op = pcmk__xml_next(xml_op)) { - const char *value = NULL; - char *op_id = NULL; - - /* This resource operation is not a failed probe. */ - if (!pcmk_xe_mask_probe_failure(xml_op)) { - continue; - } + const xmlNode *xml_a = a; + const xmlNode *xml_b = b; - /* This resource operation was not run on the given node. Note that if name is - * NULL, this will always succeed. - */ - value = crm_element_value(xml_op, XML_LRM_ATTR_TARGET); - if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) { - continue; - } + return pe__is_newer_op(xml_a, xml_b, true); +} - /* This resource operation has no operation_key. */ - value = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); - if (!parse_op_key(value ? value : ID(xml_op), &op_id, NULL, NULL)) { - continue; - } +/*! + * \internal + * \brief Create a new pseudo-action for a resource + * + * \param[in] rsc Resource to create action for + * \param[in] task Action name + * \param[in] optional Whether action should be considered optional + * \param[in] runnable Whethe action should be considered runnable + * + * \return New action object corresponding to arguments + */ +pe_action_t * +pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional, + bool runnable) +{ + pe_action_t *action = NULL; - /* This resource operation's ID does not match the rsc_id we are looking for. */ - if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) { - free(op_id); - continue; - } + CRM_ASSERT((rsc != NULL) && (task != NULL)); - free(op_id); - return xml_op; + action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL, + optional, TRUE, rsc->cluster); + pe__set_action_flags(action, pe_action_pseudo); + if (runnable) { + pe__set_action_flags(action, pe_action_runnable); } - - return NULL; + return action; } diff --git a/lib/pacemaker/pcmk_sched_notif.c b/lib/pengine/pe_notif.c similarity index 98% rename from lib/pacemaker/pcmk_sched_notif.c rename to lib/pengine/pe_notif.c index a7d5af8cb2..a2693c5aaf 100644 --- a/lib/pacemaker/pcmk_sched_notif.c +++ b/lib/pengine/pe_notif.c @@ -1,984 +1,983 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include -#include "libpacemaker_private.h" typedef struct notify_entry_s { pe_resource_t *rsc; pe_node_t *node; } notify_entry_t; /*! * \internal * \brief Compare two notification entries * * Compare two notification entries, where the one with the alphabetically first * resource name (or if equal, node name) sorts as first, with NULL sorting as * less than non-NULL. * * \param[in] a First notification entry to compare * \param[in] b Second notification entry to compare * * \return -1 if \p a sorts before \p b, 0 if they are equal, otherwise 1 */ static gint compare_notify_entries(gconstpointer a, gconstpointer b) { int tmp; const notify_entry_t *entry_a = a; const notify_entry_t *entry_b = b; // NULL a or b is not actually possible if ((entry_a == NULL) && (entry_b == NULL)) { return 0; } if (entry_a == NULL) { return 1; } if (entry_b == NULL) { return -1; } // NULL resources sort first if ((entry_a->rsc == NULL) && (entry_b->rsc == NULL)) { return 0; } if (entry_a->rsc == NULL) { return 1; } if (entry_b->rsc == NULL) { return -1; } // Compare resource names tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id); if (tmp != 0) { return tmp; } // Otherwise NULL nodes sort first if ((entry_a->node == NULL) && (entry_b->node == NULL)) { return 0; } if (entry_a->node == NULL) { return 1; } if (entry_b->node == NULL) { return -1; } // Finally, compare node names return strcmp(entry_a->node->details->id, entry_b->node->details->id); } /*! * \internal * \brief Duplicate a notification entry * * \param[in] entry Entry to duplicate * * \return Newly allocated duplicate of \p entry * \note It is the caller's responsibility to free the return value. */ static notify_entry_t * dup_notify_entry(notify_entry_t *entry) { notify_entry_t *dup = calloc(1, sizeof(notify_entry_t)); CRM_ASSERT(dup != NULL); dup->rsc = entry->rsc; dup->node = entry->node; return dup; } /*! * \internal * \brief Given a list of nodes, create strings with node names * * \param[in] list List of nodes (as pe_node_t *) * \param[out] all_node_names If not NULL, will be set to space-separated list * of the names of all nodes in \p list * \param[out] host_node_names Same as \p all_node_names, except active * guest nodes will list the name of their host * * \note The caller is responsible for freeing the output arguments. */ static void get_node_names(GList *list, char **all_node_names, char **host_node_names) { size_t all_len = 0; size_t host_len = 0; if (all_node_names != NULL) { *all_node_names = NULL; } if (host_node_names != NULL) { *host_node_names = NULL; } for (GList *iter = list; iter != NULL; iter = iter->next) { pe_node_t *node = (pe_node_t *) iter->data; if (node->details->uname == NULL) { continue; } // Always add to list of all node names if (all_node_names != NULL) { pcmk__add_word(all_node_names, &all_len, node->details->uname); } // Add to host node name list if appropriate if (host_node_names != NULL) { if (pe__is_guest_node(node) && (node->details->remote_rsc->container->running_on != NULL)) { node = pe__current_node(node->details->remote_rsc->container); if (node->details->uname == NULL) { continue; } } pcmk__add_word(host_node_names, &host_len, node->details->uname); } } if ((all_node_names != NULL) && (*all_node_names == NULL)) { *all_node_names = strdup(" "); CRM_ASSERT(*all_node_names != NULL); } if ((host_node_names != NULL) && (*host_node_names == NULL)) { *host_node_names = strdup(" "); CRM_ASSERT(*host_node_names != NULL); } } /*! * \internal * \brief Create strings of instance and node names from notification entries * * \param[in,out] list List of notification entries (will be sorted here) * \param[out] rsc_names If not NULL, will be set to space-separated list * of clone instances from \p list * \param[out] node_names If not NULL, will be set to space-separated list * of node names from \p list * * \return (Possibly new) head of sorted \p list * \note The caller is responsible for freeing the output argument values. */ static GList * notify_entries_to_strings(GList *list, char **rsc_names, char **node_names) { const char *last_rsc_id = NULL; size_t rsc_names_len = 0; size_t node_names_len = 0; // Initialize output lists to NULL if (rsc_names != NULL) { *rsc_names = NULL; } if (node_names != NULL) { *node_names = NULL; } // Sort input list for user-friendliness (and ease of filtering duplicates) list = g_list_sort(list, compare_notify_entries); for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { notify_entry_t *entry = (notify_entry_t *) gIter->data; // Entry must have a resource (with ID) CRM_LOG_ASSERT((entry != NULL) && (entry->rsc != NULL) && (entry->rsc->id != NULL)); if ((entry == NULL) || (entry->rsc == NULL) || (entry->rsc->id == NULL)) { continue; } // Entry must have a node unless listing inactive resources CRM_LOG_ASSERT((node_names == NULL) || (entry->node != NULL)); if ((node_names != NULL) && (entry->node == NULL)) { continue; } // Don't add duplicates of a particular clone instance if (pcmk__str_eq(entry->rsc->id, last_rsc_id, pcmk__str_none)) { continue; } last_rsc_id = entry->rsc->id; if (rsc_names != NULL) { pcmk__add_word(rsc_names, &rsc_names_len, entry->rsc->id); } if ((node_names != NULL) && (entry->node->details->uname != NULL)) { pcmk__add_word(node_names, &node_names_len, entry->node->details->uname); } } // If there are no entries, return "empty" lists if ((rsc_names != NULL) && (*rsc_names == NULL)) { *rsc_names = strdup(" "); CRM_ASSERT(*rsc_names != NULL); } if ((node_names != NULL) && (*node_names == NULL)) { *node_names = strdup(" "); CRM_ASSERT(*node_names != NULL); } return list; } /*! * \internal * \brief Copy a meta-attribute into a notify action * * \param[in] key Name of meta-attribute to copy * \param[in] value Value of meta-attribute to copy * \param[in] user_data Notify action to copy into */ static void copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data) { pe_action_t *notify = (pe_action_t *) user_data; /* Any existing meta-attributes (for example, the action timeout) are for * the notify action itself, so don't override those. */ if (g_hash_table_lookup(notify->meta, (const char *) key) != NULL) { return; } g_hash_table_insert(notify->meta, strdup((const char *) key), strdup((const char *) value)); } static void add_notify_data_to_action_meta(notify_data_t *n_data, pe_action_t *action) { for (GSList *item = n_data->keys; item; item = item->next) { pcmk_nvpair_t *nvpair = item->data; add_hash_param(action->meta, nvpair->name, nvpair->value); } } /*! * \internal * \brief Create a new notify pseudo-action for a clone resource * * \param[in] rsc Clone resource that notification is for * \param[in] action Action to use in notify action key * \param[in] notif_action RSC_NOTIFY or RSC_NOTIFIED * \param[in] notif_type "pre", "post", "confirmed-pre", or "confirmed-post" * * \return Newly created notify pseudo-action */ static pe_action_t * new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action, const char *notif_action, const char *notif_type) { pe_action_t *notify = NULL; notify = custom_action(rsc, pcmk__notify_key(rsc->id, notif_type, action->task), notif_action, NULL, pcmk_is_set(action->flags, pe_action_optional), TRUE, rsc->cluster); pe__set_action_flags(notify, pe_action_pseudo); add_hash_param(notify->meta, "notify_key_type", notif_type); add_hash_param(notify->meta, "notify_key_operation", action->task); return notify; } /*! * \internal * \brief Create a new notify action for a clone instance * * \param[in] rsc Clone instance that notification is for * \param[in] node Node that notification is for * \param[in] op Action that notification is for * \param[in] notify_done Parent pseudo-action for notifications complete * \param[in] n_data Notification values to add to action meta-data * * \return Newly created notify action */ static pe_action_t * new_notify_action(pe_resource_t *rsc, pe_node_t *node, pe_action_t *op, pe_action_t *notify_done, notify_data_t *n_data) { char *key = NULL; pe_action_t *notify_action = NULL; const char *value = NULL; const char *task = NULL; const char *skip_reason = NULL; CRM_CHECK((rsc != NULL) && (node != NULL), return NULL); // Ensure we have all the info we need if (op == NULL) { skip_reason = "no action"; } else if (notify_done == NULL) { skip_reason = "no parent notification"; } else if (!node->details->online) { skip_reason = "node offline"; } else if (!pcmk_is_set(op->flags, pe_action_runnable)) { skip_reason = "original action not runnable"; } if (skip_reason != NULL) { pe_rsc_trace(rsc, "Skipping notify action for %s on %s: %s", rsc->id, node->details->uname, skip_reason); return NULL; } value = g_hash_table_lookup(op->meta, "notify_type"); // "pre" or "post" task = g_hash_table_lookup(op->meta, "notify_operation"); // original action pe_rsc_trace(rsc, "Creating notify action for %s on %s (%s-%s)", rsc->id, node->details->uname, value, task); // Create the notify action key = pcmk__notify_key(rsc->id, value, task); notify_action = custom_action(rsc, key, op->task, node, pcmk_is_set(op->flags, pe_action_optional), TRUE, rsc->cluster); // Add meta-data to notify action g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action); add_notify_data_to_action_meta(n_data, notify_action); // Order notify after original action and before parent notification order_actions(op, notify_action, pe_order_optional); order_actions(notify_action, notify_done, pe_order_optional); return notify_action; } /*! * \internal * \brief Create a new "post-" notify action for a clone instance * * \param[in] rsc Clone instance that notification is for * \param[in] node Node that notification is for * \param[in] n_data Notification values to add to action meta-data */ static void new_post_notify_action(pe_resource_t *rsc, pe_node_t *node, notify_data_t *n_data) { pe_action_t *notify = NULL; // Create the "post-" notify action for specified instance notify = new_notify_action(rsc, node, n_data->post, n_data->post_done, n_data); if (notify != NULL) { notify->priority = INFINITY; } // Order recurring monitors after all "post-" notifications complete if (n_data->post_done == NULL) { return; } for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) { pe_action_t *mon = (pe_action_t *) iter->data; const char *interval_ms_s = NULL; interval_ms_s = g_hash_table_lookup(mon->meta, XML_LRM_ATTR_INTERVAL_MS); if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches) || pcmk__str_eq(mon->task, RSC_CANCEL, pcmk__str_none)) { continue; // Not a recurring monitor } order_actions(n_data->post_done, mon, pe_order_optional); } } /*! * \internal * \brief Create and order notification pseudo-actions for a clone action * * In addition to the actual notify actions needed for each clone instance, * clone notifications also require pseudo-actions to provide ordering points * in the notification process. This creates the notification data, along with * appropriate pseudo-actions and their orderings. * * For example, the ordering sequence for starting a clone is: * * "pre-" notify pseudo-action for clone * -> "pre-" notify actions for each clone instance * -> "pre-" notifications complete pseudo-action for clone * -> start actions for each clone instance * -> "started" pseudo-action for clone * -> "post-" notify pseudo-action for clone * -> "post-" notify actions for each clone instance * -> "post-" notifications complete pseudo-action for clone * * \param[in] rsc Clone that notifications are for * \param[in] task Name of action that notifications are for * \param[in] action If not NULL, create a "pre-" pseudo-action ordered * before a "pre-" complete pseudo-action, ordered before * this action * \param[in] complete If not NULL, create a "post-" pseudo-action ordered * after this action, and a "post-" complete pseudo-action * ordered after that * * \return Newly created notification data */ notify_data_t * -pcmk__clone_notif_pseudo_ops(pe_resource_t *rsc, const char *task, - pe_action_t *action, pe_action_t *complete) +pe__clone_notif_pseudo_ops(pe_resource_t *rsc, const char *task, + pe_action_t *action, pe_action_t *complete) { notify_data_t *n_data = NULL; if (!pcmk_is_set(rsc->flags, pe_rsc_notify)) { return NULL; } n_data = calloc(1, sizeof(notify_data_t)); CRM_ASSERT(n_data != NULL); n_data->action = task; if (action != NULL) { // Need "pre-" pseudo-actions // Create "pre-" notify pseudo-action for clone n_data->pre = new_notify_pseudo_action(rsc, action, RSC_NOTIFY, "pre"); pe__set_action_flags(n_data->pre, pe_action_runnable); add_hash_param(n_data->pre->meta, "notify_type", "pre"); add_hash_param(n_data->pre->meta, "notify_operation", n_data->action); // Create "pre-" notifications complete pseudo-action for clone n_data->pre_done = new_notify_pseudo_action(rsc, action, RSC_NOTIFIED, "confirmed-pre"); pe__set_action_flags(n_data->pre_done, pe_action_runnable); add_hash_param(n_data->pre_done->meta, "notify_type", "pre"); add_hash_param(n_data->pre_done->meta, "notify_operation", n_data->action); // Order "pre-" -> "pre-" complete -> original action order_actions(n_data->pre, n_data->pre_done, pe_order_optional); order_actions(n_data->pre_done, action, pe_order_optional); } if (complete != NULL) { // Need "post-" pseudo-actions // Create "post-" notify pseudo-action for clone n_data->post = new_notify_pseudo_action(rsc, complete, RSC_NOTIFY, "post"); n_data->post->priority = INFINITY; if (pcmk_is_set(complete->flags, pe_action_runnable)) { pe__set_action_flags(n_data->post, pe_action_runnable); } else { pe__clear_action_flags(n_data->post, pe_action_runnable); } add_hash_param(n_data->post->meta, "notify_type", "post"); add_hash_param(n_data->post->meta, "notify_operation", n_data->action); // Create "post-" notifications complete pseudo-action for clone n_data->post_done = new_notify_pseudo_action(rsc, complete, RSC_NOTIFIED, "confirmed-post"); n_data->post_done->priority = INFINITY; if (pcmk_is_set(complete->flags, pe_action_runnable)) { pe__set_action_flags(n_data->post_done, pe_action_runnable); } else { pe__clear_action_flags(n_data->post_done, pe_action_runnable); } add_hash_param(n_data->post_done->meta, "notify_type", "post"); add_hash_param(n_data->post_done->meta, "notify_operation", n_data->action); // Order original action complete -> "post-" -> "post-" complete order_actions(complete, n_data->post, pe_order_implies_then); order_actions(n_data->post, n_data->post_done, pe_order_implies_then); } // If we created both, order "pre-" complete -> "post-" if ((action != NULL) && (complete != NULL)) { order_actions(n_data->pre_done, n_data->post, pe_order_optional); } return n_data; } /*! * \internal * \brief Create a new notification entry * * \param[in] rsc Resource for notification * \param[in] node Node for notification * * \return Newly allocated notification entry * \note The caller is responsible for freeing the return value. */ static notify_entry_t * new_notify_entry(pe_resource_t *rsc, pe_node_t *node) { notify_entry_t *entry = calloc(1, sizeof(notify_entry_t)); CRM_ASSERT(entry != NULL); entry->rsc = rsc; entry->node = node; return entry; } /*! * \internal * \brief Add notification data for resource state and optionally actions * * \param[in] rsc Clone or clone instance being notified * \param[in] activity Whether to add notification entries for actions * \param[in] n_data Notification data for clone */ static void collect_resource_data(pe_resource_t *rsc, bool activity, notify_data_t *n_data) { GList *iter = NULL; notify_entry_t *entry = NULL; pe_node_t *node = NULL; if (n_data->allowed_nodes == NULL) { n_data->allowed_nodes = rsc->allowed_nodes; } // If this is a clone, call recursively for each instance if (rsc->children != NULL) { for (iter = rsc->children; iter != NULL; iter = iter->next) { pe_resource_t *child = (pe_resource_t *) iter->data; collect_resource_data(child, activity, n_data); } return; } // This is a notification for a single clone instance if (rsc->running_on != NULL) { node = rsc->running_on->data; // First is sufficient } entry = new_notify_entry(rsc, node); // Add notification indicating the resource state switch (rsc->role) { case RSC_ROLE_STOPPED: n_data->inactive = g_list_prepend(n_data->inactive, entry); break; case RSC_ROLE_STARTED: n_data->active = g_list_prepend(n_data->active, entry); break; case RSC_ROLE_UNPROMOTED: n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry); n_data->active = g_list_prepend(n_data->active, dup_notify_entry(entry)); break; case RSC_ROLE_PROMOTED: n_data->promoted = g_list_prepend(n_data->promoted, entry); n_data->active = g_list_prepend(n_data->active, dup_notify_entry(entry)); break; default: crm_err("Resource %s role on %s (%s) is not supported for " "notifications (bug?)", rsc->id, ((node == NULL)? "no node" : node->details->uname), role2text(rsc->role)); free(entry); break; } if (!activity) { return; } // Add notification entries for each of the resource's actions for (iter = rsc->actions; iter != NULL; iter = iter->next) { pe_action_t *op = (pe_action_t *) iter->data; if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) { enum action_tasks task = text2task(op->task); if ((task == stop_rsc) && op->node->details->unclean) { // Create anyway (additional noise if node can't be fenced) } else if (!pcmk_is_set(op->flags, pe_action_runnable)) { continue; } entry = new_notify_entry(rsc, op->node); switch (task) { case start_rsc: n_data->start = g_list_prepend(n_data->start, entry); break; case stop_rsc: n_data->stop = g_list_prepend(n_data->stop, entry); break; case action_promote: n_data->promote = g_list_prepend(n_data->promote, entry); break; case action_demote: n_data->demote = g_list_prepend(n_data->demote, entry); break; default: free(entry); break; } } } } #define add_notify_env(n_data, key, value) do { \ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value); \ } while (0) #define add_notify_env_free(n_data, key, value) do { \ n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value); \ free(value); value = NULL; \ } while (0) /*! * \internal * \brief Create notification name/value pairs from structured data * * \param[in] rsc Resource that notification is for * \param[in,out] n_data Notification data */ static void add_notif_keys(pe_resource_t *rsc, notify_data_t *n_data) { bool required = false; // Whether to make notify actions required char *rsc_list = NULL; char *node_list = NULL; char *metal_list = NULL; const char *source = NULL; GList *nodes = NULL; n_data->stop = notify_entries_to_strings(n_data->stop, &rsc_list, &node_list); if (!pcmk__str_eq(" ", rsc_list, pcmk__str_null_matches) && pcmk__str_eq(n_data->action, RSC_STOP, pcmk__str_casei)) { required = true; } add_notify_env_free(n_data, "notify_stop_resource", rsc_list); add_notify_env_free(n_data, "notify_stop_uname", node_list); if ((n_data->start != NULL) && pcmk__str_eq(n_data->action, RSC_START, pcmk__str_none)) { required = true; } n_data->start = notify_entries_to_strings(n_data->start, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_start_resource", rsc_list); add_notify_env_free(n_data, "notify_start_uname", node_list); if ((n_data->demote != NULL) && pcmk__str_eq(n_data->action, RSC_DEMOTE, pcmk__str_none)) { required = true; } n_data->demote = notify_entries_to_strings(n_data->demote, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_demote_resource", rsc_list); add_notify_env_free(n_data, "notify_demote_uname", node_list); if ((n_data->promote != NULL) && pcmk__str_eq(n_data->action, RSC_PROMOTE, pcmk__str_none)) { required = true; } n_data->promote = notify_entries_to_strings(n_data->promote, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_promote_resource", rsc_list); add_notify_env_free(n_data, "notify_promote_uname", node_list); n_data->active = notify_entries_to_strings(n_data->active, &rsc_list, &node_list); add_notify_env_free(n_data, "notify_active_resource", rsc_list); add_notify_env_free(n_data, "notify_active_uname", node_list); n_data->unpromoted = notify_entries_to_strings(n_data->unpromoted, &rsc_list, &node_list); add_notify_env(n_data, "notify_unpromoted_resource", rsc_list); add_notify_env(n_data, "notify_unpromoted_uname", node_list); // Deprecated: kept for backward compatibility with older resource agents add_notify_env_free(n_data, "notify_slave_resource", rsc_list); add_notify_env_free(n_data, "notify_slave_uname", node_list); n_data->promoted = notify_entries_to_strings(n_data->promoted, &rsc_list, &node_list); add_notify_env(n_data, "notify_promoted_resource", rsc_list); add_notify_env(n_data, "notify_promoted_uname", node_list); // Deprecated: kept for backward compatibility with older resource agents add_notify_env_free(n_data, "notify_master_resource", rsc_list); add_notify_env_free(n_data, "notify_master_uname", node_list); n_data->inactive = notify_entries_to_strings(n_data->inactive, &rsc_list, NULL); add_notify_env_free(n_data, "notify_inactive_resource", rsc_list); nodes = g_hash_table_get_values(n_data->allowed_nodes); if (!pcmk__is_daemon) { /* For display purposes, sort the node list, for consistent * regression test output (while avoiding the performance hit * for the live cluster). */ nodes = g_list_sort(nodes, sort_node_uname); } get_node_names(nodes, &node_list, NULL); add_notify_env_free(n_data, "notify_available_uname", node_list); g_list_free(nodes); source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET); if (pcmk__str_eq("host", source, pcmk__str_none)) { get_node_names(rsc->cluster->nodes, &node_list, &metal_list); add_notify_env_free(n_data, "notify_all_hosts", metal_list); } else { get_node_names(rsc->cluster->nodes, &node_list, NULL); } add_notify_env_free(n_data, "notify_all_uname", node_list); if (required && (n_data->pre != NULL)) { pe__clear_action_flags(n_data->pre, pe_action_optional); pe__clear_action_flags(n_data->pre_done, pe_action_optional); } if (required && (n_data->post != NULL)) { pe__clear_action_flags(n_data->post, pe_action_optional); pe__clear_action_flags(n_data->post_done, pe_action_optional); } } /* * \internal * \brief Find any remote connection start relevant to an action * * \param[in] action Action to check * * \return If action is behind a remote connection, connection's start */ static pe_action_t * find_remote_start(pe_action_t *action) { if ((action != NULL) && (action->node != NULL)) { pe_resource_t *remote_rsc = action->node->details->remote_rsc; if (remote_rsc != NULL) { return find_first_action(remote_rsc->actions, NULL, RSC_START, NULL); } } return NULL; } /*! * \internal * \brief Create notify actions, and add notify data to original actions * * \param[in] rsc Clone or clone instance that notification is for * \param[in] n_data Clone notification data for some action */ static void create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data) { GList *iter = NULL; pe_action_t *stop = NULL; pe_action_t *start = NULL; enum action_tasks task = text2task(n_data->action); // If this is a clone, call recursively for each instance if (rsc->children != NULL) { g_list_foreach(rsc->children, (GFunc) create_notify_actions, n_data); return; } // Add notification meta-attributes to original actions for (iter = rsc->actions; iter != NULL; iter = iter->next) { pe_action_t *op = (pe_action_t *) iter->data; if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) { switch (text2task(op->task)) { case start_rsc: case stop_rsc: case action_promote: case action_demote: add_notify_data_to_action_meta(n_data, op); break; default: break; } } } // Skip notify action itself if original action was not needed switch (task) { case start_rsc: if (n_data->start == NULL) { pe_rsc_trace(rsc, "No notify action needed for %s %s", rsc->id, n_data->action); return; } break; case action_promote: if (n_data->promote == NULL) { pe_rsc_trace(rsc, "No notify action needed for %s %s", rsc->id, n_data->action); return; } break; case action_demote: if (n_data->demote == NULL) { pe_rsc_trace(rsc, "No notify action needed for %s %s", rsc->id, n_data->action); return; } break; default: // We cannot do same for stop because it might be implied by fencing break; } pe_rsc_trace(rsc, "Creating notify actions for %s %s", rsc->id, n_data->action); // Create notify actions for stop or demote if ((rsc->role != RSC_ROLE_STOPPED) && ((task == stop_rsc) || (task == action_demote))) { stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL); for (iter = rsc->running_on; iter != NULL; iter = iter->next) { pe_node_t *current_node = (pe_node_t *) iter->data; /* If a stop is a pseudo-action implied by fencing, don't try to * notify the node getting fenced. */ if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo) && (current_node->details->unclean || current_node->details->remote_requires_reset)) { continue; } new_notify_action(rsc, current_node, n_data->pre, n_data->pre_done, n_data); if ((task == action_demote) || (stop == NULL) || pcmk_is_set(stop->flags, pe_action_optional)) { new_post_notify_action(rsc, current_node, n_data); } } } // Create notify actions for start or promote if ((rsc->next_role != RSC_ROLE_STOPPED) && ((task == start_rsc) || (task == action_promote))) { start = find_first_action(rsc->actions, NULL, RSC_START, NULL); if (start != NULL) { pe_action_t *remote_start = find_remote_start(start); if ((remote_start != NULL) && !pcmk_is_set(remote_start->flags, pe_action_runnable)) { /* Start and promote actions for a clone instance behind * a Pacemaker Remote connection happen after the * connection starts. If the connection start is blocked, do * not schedule notifications for these actions. */ return; } } if (rsc->allocated_to == NULL) { pe_proc_err("Next role '%s' but %s is not allocated", role2text(rsc->next_role), rsc->id); return; } if ((task != start_rsc) || (start == NULL) || pcmk_is_set(start->flags, pe_action_optional)) { new_notify_action(rsc, rsc->allocated_to, n_data->pre, n_data->pre_done, n_data); } new_post_notify_action(rsc, rsc->allocated_to, n_data); } } /*! * \internal * \brief Create notification data and actions for a clone * * \param[in] rsc Clone resource that notification is for * \param[in] n_data Clone notification data for some action */ void -pcmk__create_notifications(pe_resource_t *rsc, notify_data_t *n_data) +pe__create_notifications(pe_resource_t *rsc, notify_data_t *n_data) { if ((rsc == NULL) || (n_data == NULL)) { return; } collect_resource_data(rsc, true, n_data); add_notif_keys(rsc, n_data); create_notify_actions(rsc, n_data); } /*! * \internal * \brief Free notification data * * \param[in] n_data Notification data to free */ void -pcmk__free_notification_data(notify_data_t *n_data) +pe__free_notification_data(notify_data_t *n_data) { if (n_data == NULL) { return; } g_list_free_full(n_data->stop, free); g_list_free_full(n_data->start, free); g_list_free_full(n_data->demote, free); g_list_free_full(n_data->promote, free); g_list_free_full(n_data->promoted, free); g_list_free_full(n_data->unpromoted, free); g_list_free_full(n_data->active, free); g_list_free_full(n_data->inactive, free); pcmk_free_nvpairs(n_data->keys); free(n_data); } /*! * \internal * \brief Order clone "notifications complete" pseudo-action after fencing * * If a stop action is implied by fencing, the usual notification pseudo-actions * will not be sufficient to order things properly, or even create all needed * notifications if the clone is also stopping on another node, and another * clone is ordered after it. This function creates new notification * pseudo-actions relative to the fencing to ensure everything works properly. * * \param[in] stop Stop action implied by fencing * \param[in] rsc Clone resource that notification is for * \param[in] stonith_op Fencing action that implies \p stop */ void -pcmk__order_notifs_after_fencing(pe_action_t *stop, pe_resource_t *rsc, - pe_action_t *stonith_op) +pe__order_notifs_after_fencing(pe_action_t *stop, pe_resource_t *rsc, + pe_action_t *stonith_op) { notify_data_t *n_data; crm_info("Ordering notifications for implied %s after fencing", stop->uuid); - n_data = pcmk__clone_notif_pseudo_ops(rsc, RSC_STOP, NULL, stonith_op); + n_data = pe__clone_notif_pseudo_ops(rsc, RSC_STOP, NULL, stonith_op); collect_resource_data(rsc, false, n_data); add_notify_env(n_data, "notify_stop_resource", rsc->id); add_notify_env(n_data, "notify_stop_uname", stop->node->details->uname); create_notify_actions(uber_parent(rsc), n_data); - pcmk__free_notification_data(n_data); + pe__free_notification_data(n_data); } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index 26c8062faf..90a7e0a42d 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1,2617 +1,867 @@ /* * Copyright 2004-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include -#include -#include -#include -#include -#include #include #include +#include +#include #include #include -#include "pe_status_private.h" extern bool pcmk__is_daemon; void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); -static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, - pe_working_set_t * data_set, guint interval_ms); -static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, - gboolean include_disabled); - -#if ENABLE_VERSIONED_ATTRS -pe_rsc_action_details_t * -pe_rsc_action_details(pe_action_t *action) -{ - pe_rsc_action_details_t *details; - - CRM_CHECK(action != NULL, return NULL); - - if (action->action_details == NULL) { - action->action_details = calloc(1, sizeof(pe_rsc_action_details_t)); - CRM_CHECK(action->action_details != NULL, return NULL); - } - - details = (pe_rsc_action_details_t *) action->action_details; - if (details->versioned_parameters == NULL) { - details->versioned_parameters = create_xml_node(NULL, - XML_TAG_OP_VER_ATTRS); - } - if (details->versioned_meta == NULL) { - details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META); - } - return details; -} - -static void -pe_free_rsc_action_details(pe_action_t *action) -{ - pe_rsc_action_details_t *details; - - if ((action == NULL) || (action->action_details == NULL)) { - return; - } - - details = (pe_rsc_action_details_t *) action->action_details; - - if (details->versioned_parameters) { - free_xml(details->versioned_parameters); - } - if (details->versioned_meta) { - free_xml(details->versioned_meta); - } - - action->action_details = NULL; -} -#endif /*! * \internal * \brief Check whether we can fence a particular node * * \param[in] data_set Working set for cluster * \param[in] node Name of node to check * * \return true if node can be fenced, false otherwise */ bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node) { if (pe__is_guest_node(node)) { /* Guest nodes are fenced by stopping their container resource. We can * do that if the container's host is either online or fenceable. */ pe_resource_t *rsc = node->details->remote_rsc->container; for (GList *n = rsc->running_on; n != NULL; n = n->next) { pe_node_t *container_node = n->data; if (!container_node->details->online && !pe_can_fence(data_set, container_node)) { return false; } } return true; } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { return false; /* Turned off */ } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) { return false; /* No devices */ } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { return true; } else if (data_set->no_quorum_policy == no_quorum_ignore) { return true; } else if(node == NULL) { return false; } else if(node->details->online) { crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname); return true; } crm_trace("Cannot fence %s", node->details->uname); return false; } /*! * \internal * \brief Copy a node object * * \param[in] this_node Node object to copy * * \return Newly allocated shallow copy of this_node * \note This function asserts on errors and is guaranteed to return non-NULL. */ pe_node_t * pe__copy_node(const pe_node_t *this_node) { pe_node_t *new_node = NULL; CRM_ASSERT(this_node != NULL); new_node = calloc(1, sizeof(pe_node_t)); CRM_ASSERT(new_node != NULL); new_node->rsc_discover_mode = this_node->rsc_discover_mode; new_node->weight = this_node->weight; new_node->fixed = this_node->fixed; new_node->details = this_node->details; return new_node; } /* any node in list1 or list2 and not in the other gets a score of -INFINITY */ void node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores) { GHashTable *result = hash; pe_node_t *other_node = NULL; GList *gIter = list; GHashTableIter iter; pe_node_t *node = NULL; g_hash_table_iter_init(&iter, hash); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { other_node = pe_find_node_id(list, node->details->id); if (other_node == NULL) { node->weight = -INFINITY; } else if (merge_scores) { node->weight = pcmk__add_scores(node->weight, other_node->weight); } } for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; other_node = pe_hash_table_lookup(result, node->details->id); if (other_node == NULL) { pe_node_t *new_node = pe__copy_node(node); new_node->weight = -INFINITY; g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } } } /*! * \internal * \brief Create a node hash table from a node list * * \param[in] list Node list * * \return Hash table equivalent of node list */ GHashTable * pe__node_list2table(GList *list) { GHashTable *result = NULL; result = pcmk__strkey_table(NULL, free); for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *new_node = pe__copy_node((pe_node_t *) gIter->data); g_hash_table_insert(result, (gpointer) new_node->details->id, new_node); } return result; } gint sort_node_uname(gconstpointer a, gconstpointer b) { return pcmk__numeric_strcasecmp(((const pe_node_t *) a)->details->uname, ((const pe_node_t *) b)->details->uname); } /*! * \internal * \brief Output node weights to stdout * * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ static void pe__output_node_weights(pe_resource_t *rsc, const char *comment, GHashTable *nodes, pe_working_set_t *data_set) { pcmk__output_t *out = data_set->priv; // Sort the nodes so the output is consistent for regression tests GList *list = g_list_sort(g_hash_table_get_values(nodes), sort_node_uname); for (GList *gIter = list; gIter != NULL; gIter = gIter->next) { pe_node_t *node = (pe_node_t *) gIter->data; out->message(out, "node-weight", rsc, comment, node->details->uname, pcmk_readable_score(node->weight)); } g_list_free(list); } /*! * \internal * \brief Log node weights at trace level * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with * \param[in] nodes If rsc is not specified, use these nodes */ static void pe__log_node_weights(const char *file, const char *function, int line, pe_resource_t *rsc, const char *comment, GHashTable *nodes) { GHashTableIter iter; pe_node_t *node = NULL; // Don't waste time if we're not tracing at this point pcmk__log_else(LOG_TRACE, return); g_hash_table_iter_init(&iter, nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { if (rsc) { qb_log_from_external_source(function, file, "%s: %s allocation score on %s: %s", LOG_TRACE, line, 0, comment, rsc->id, node->details->uname, pcmk_readable_score(node->weight)); } else { qb_log_from_external_source(function, file, "%s: %s = %s", LOG_TRACE, line, 0, comment, node->details->uname, pcmk_readable_score(node->weight)); } } } /*! * \internal * \brief Log or output node weights * * \param[in] file Caller's filename * \param[in] function Caller's function name * \param[in] line Caller's line number * \param[in] to_log Log if true, otherwise output * \param[in] rsc Use allowed nodes for this resource * \param[in] comment Text description to prefix lines with - * \param[in] nodes Use these nodes - */ -void -pe__show_node_weights_as(const char *file, const char *function, int line, - bool to_log, pe_resource_t *rsc, const char *comment, - GHashTable *nodes, pe_working_set_t *data_set) -{ - if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) { - // Don't show allocation scores for orphans - return; - } - if (nodes == NULL) { - // Nothing to show - return; - } - - if (to_log) { - pe__log_node_weights(file, function, line, rsc, comment, nodes); - } else { - pe__output_node_weights(rsc, comment, nodes, data_set); - } - - // If this resource has children, repeat recursively for each - if (rsc && rsc->children) { - for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { - pe_resource_t *child = (pe_resource_t *) gIter->data; - - pe__show_node_weights_as(file, function, line, to_log, child, - comment, child->allowed_nodes, data_set); - } - } -} - -gint -sort_rsc_priority(gconstpointer a, gconstpointer b) -{ - const pe_resource_t *resource1 = (const pe_resource_t *)a; - const pe_resource_t *resource2 = (const pe_resource_t *)b; - - if (a == NULL && b == NULL) { - return 0; - } - if (a == NULL) { - return 1; - } - if (b == NULL) { - return -1; - } - - if (resource1->priority > resource2->priority) { - return -1; - } - - if (resource1->priority < resource2->priority) { - return 1; - } - - return 0; -} - -static enum pe_quorum_policy -effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) -{ - enum pe_quorum_policy policy = data_set->no_quorum_policy; - - if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) { - policy = no_quorum_ignore; - - } else if (data_set->no_quorum_policy == no_quorum_demote) { - switch (rsc->role) { - case RSC_ROLE_PROMOTED: - case RSC_ROLE_UNPROMOTED: - if (rsc->next_role > RSC_ROLE_UNPROMOTED) { - pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, - "no-quorum-policy=demote"); - } - policy = no_quorum_ignore; - break; - default: - policy = no_quorum_stop; - break; - } - } - return policy; -} - -static void -add_singleton(pe_working_set_t *data_set, pe_action_t *action) -{ - if (data_set->singletons == NULL) { - data_set->singletons = pcmk__strkey_table(NULL, NULL); - } - g_hash_table_insert(data_set->singletons, action->uuid, action); -} - -static pe_action_t * -lookup_singleton(pe_working_set_t *data_set, const char *action_uuid) -{ - if (data_set->singletons == NULL) { - return NULL; - } - return g_hash_table_lookup(data_set->singletons, action_uuid); -} - -/*! - * \internal - * \brief Find an existing action that matches arguments - * - * \param[in] key Action key to match - * \param[in] rsc Resource to match (if any) - * \param[in] node Node to match (if any) - * \param[in] data_set Cluster working set - * - * \return Existing action that matches arguments (or NULL if none) - */ -static pe_action_t * -find_existing_action(const char *key, pe_resource_t *rsc, pe_node_t *node, - pe_working_set_t *data_set) -{ - GList *matches = NULL; - pe_action_t *action = NULL; - - /* When rsc is NULL, it would be quicker to check data_set->singletons, - * but checking all data_set->actions takes the node into account. - */ - matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions), - key, node); - if (matches == NULL) { - return NULL; - } - CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches)); - - action = matches->data; - g_list_free(matches); - return action; -} - -/*! - * \internal - * \brief Create a new action object - * - * \param[in] key Action key - * \param[in] task Action name - * \param[in] rsc Resource that action is for (if any) - * \param[in] node Node that action is on (if any) - * \param[in] optional Whether action should be considered optional - * \param[in] for_graph Whether action should be recorded in transition graph - * \param[in] data_set Cluster working set - * - * \return Newly allocated action - * \note This function takes ownership of \p key. It is the caller's - * responsibility to free the return value with pe_free_action(). - */ -static pe_action_t * -new_action(char *key, const char *task, pe_resource_t *rsc, pe_node_t *node, - bool optional, bool for_graph, pe_working_set_t *data_set) -{ - pe_action_t *action = calloc(1, sizeof(pe_action_t)); - - CRM_ASSERT(action != NULL); - - action->rsc = rsc; - action->task = strdup(task); CRM_ASSERT(action->task != NULL); - action->uuid = key; - action->extra = pcmk__strkey_table(free, free); - action->meta = pcmk__strkey_table(free, free); - - if (node) { - action->node = pe__copy_node(node); - } - - if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { - // Resource history deletion for a node can be done on the DC - pe__set_action_flags(action, pe_action_dc); - } - - pe__set_action_flags(action, pe_action_runnable); - if (optional) { - pe__set_action_flags(action, pe_action_optional); - } else { - pe__clear_action_flags(action, pe_action_optional); - } - - if (rsc != NULL) { - guint interval_ms = 0; - - action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); - parse_op_key(key, NULL, NULL, &interval_ms); - unpack_operation(action, action->op_entry, rsc->container, data_set, - interval_ms); - } - - if (for_graph) { - pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s", - (optional? "optional" : "required"), - data_set->action_id, key, task, - ((rsc == NULL)? "no resource" : rsc->id), - ((node == NULL)? "no node" : node->details->uname)); - action->id = data_set->action_id++; - - data_set->actions = g_list_prepend(data_set->actions, action); - if (rsc == NULL) { - add_singleton(data_set, action); - } else { - rsc->actions = g_list_prepend(rsc->actions, action); - } - } - return action; -} - -/*! - * \internal - * \brief Evaluate node attribute values for an action - * - * \param[in] action Action to unpack attributes for - * \param[in] data_set Cluster working set - */ -static void -unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set) -{ - if (!pcmk_is_set(action->flags, pe_action_have_node_attrs) - && (action->op_entry != NULL)) { - - pe_rule_eval_data_t rule_data = { - .node_hash = action->node->details->attrs, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - pe__set_action_flags(action, pe_action_have_node_attrs); - pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, - &rule_data, action->extra, NULL, - FALSE, data_set); - } -} - -/*! - * \internal - * \brief Update an action's optional flag - * - * \param[in] action Action to update - * \param[in] optional Requested optional status - */ -static void -update_action_optional(pe_action_t *action, gboolean optional) -{ - // Force a non-recurring action to be optional if its resource is unmanaged - if ((action->rsc != NULL) && (action->node != NULL) - && !pcmk_is_set(action->flags, pe_action_pseudo) - && !pcmk_is_set(action->rsc->flags, pe_rsc_managed) - && (g_hash_table_lookup(action->meta, - XML_LRM_ATTR_INTERVAL_MS) == NULL)) { - pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)", - action->uuid, action->node->details->uname, - action->rsc->id); - pe__set_action_flags(action, pe_action_optional); - // We shouldn't clear runnable here because ... something - - // Otherwise require the action if requested - } else if (!optional) { - pe__clear_action_flags(action, pe_action_optional); - } -} - -/*! - * \internal - * \brief Update a resource action's runnable flag - * - * \param[in] action Action to update - * \param[in] for_graph Whether action should be recorded in transition graph - * \param[in] data_set Cluster working set - * - * \note This may also schedule fencing if a stop is unrunnable. - */ -static void -update_resource_action_runnable(pe_action_t *action, bool for_graph, - pe_working_set_t *data_set) -{ - if (pcmk_is_set(action->flags, pe_action_pseudo)) { - return; - } - - if (action->node == NULL) { - pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)", - action->uuid); - pe__clear_action_flags(action, pe_action_runnable); - - } else if (!pcmk_is_set(action->flags, pe_action_dc) - && !(action->node->details->online) - && (!pe__is_guest_node(action->node) - || action->node->details->remote_requires_reset)) { - pe__clear_action_flags(action, pe_action_runnable); - do_crm_log((for_graph? LOG_WARNING: LOG_TRACE), - "%s on %s is unrunnable (node is offline)", - action->uuid, action->node->details->uname); - if (pcmk_is_set(action->rsc->flags, pe_rsc_managed) - && for_graph - && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei) - && !(action->node->details->unclean)) { - pe_fence_node(data_set, action->node, "stop is unrunnable", false); - } - - } else if (!pcmk_is_set(action->flags, pe_action_dc) - && action->node->details->pending) { - pe__clear_action_flags(action, pe_action_runnable); - do_crm_log((for_graph? LOG_WARNING: LOG_TRACE), - "Action %s on %s is unrunnable (node is pending)", - action->uuid, action->node->details->uname); - - } else if (action->needs == rsc_req_nothing) { - pe_action_set_reason(action, NULL, TRUE); - if (pe__is_guest_node(action->node) - && !pe_can_fence(data_set, action->node)) { - /* An action that requires nothing usually does not require any - * fencing in order to be runnable. However, there is an exception: - * such an action cannot be completed if it is on a guest node whose - * host is unclean and cannot be fenced. - */ - pe_rsc_debug(action->rsc, "%s on %s is unrunnable " - "(node's host cannot be fenced)", - action->uuid, action->node->details->uname); - pe__clear_action_flags(action, pe_action_runnable); - } else { - pe_rsc_trace(action->rsc, - "%s on %s does not require fencing or quorum", - action->uuid, action->node->details->uname); - pe__set_action_flags(action, pe_action_runnable); - } - - } else { - switch (effective_quorum_policy(action->rsc, data_set)) { - case no_quorum_stop: - pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)", - action->uuid, action->node->details->uname); - pe__clear_action_flags(action, pe_action_runnable); - pe_action_set_reason(action, "no quorum", true); - break; - - case no_quorum_freeze: - if (!action->rsc->fns->active(action->rsc, TRUE) - || (action->rsc->next_role > action->rsc->role)) { - pe_rsc_debug(action->rsc, - "%s on %s is unrunnable (no quorum)", - action->uuid, action->node->details->uname); - pe__clear_action_flags(action, pe_action_runnable); - pe_action_set_reason(action, "quorum freeze", true); - } - break; - - default: - //pe_action_set_reason(action, NULL, TRUE); - pe__set_action_flags(action, pe_action_runnable); - break; - } - } -} - -/*! - * \internal - * \brief Update a resource object's flags for a new action on it - * - * \param[in] rsc Resource that action is for (if any) - * \param[in] action New action - */ -static void -update_resource_flags_for_action(pe_resource_t *rsc, pe_action_t *action) -{ - /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used - * within Pacemaker, and should be deprecated and eventually removed - */ - if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) { - pe__set_resource_flags(rsc, pe_rsc_stopping); - - } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) { - if (pcmk_is_set(action->flags, pe_action_runnable)) { - pe__set_resource_flags(rsc, pe_rsc_starting); - } else { - pe__clear_resource_flags(rsc, pe_rsc_starting); - } - } -} - -/*! - * \brief Create or update an action object - * - * \param[in] rsc Resource that action is for (if any) - * \param[in] key Action key (must be non-NULL) - * \param[in] task Action name (must be non-NULL) - * \param[in] on_node Node that action is on (if any) - * \param[in] optional Whether action should be considered optional - * \param[in] save_action Whether action should be recorded in transition graph - * \param[in] data_set Cluster working set - * - * \return Action object corresponding to arguments - * \note This function takes ownership of (and might free) \p key. If - * \p save_action is true, \p data_set will own the returned action, - * otherwise it is the caller's responsibility to free the return value - * with pe_free_action(). - */ -pe_action_t * -custom_action(pe_resource_t *rsc, char *key, const char *task, - pe_node_t *on_node, gboolean optional, gboolean save_action, - pe_working_set_t *data_set) -{ - pe_action_t *action = NULL; - - CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL)); - - if (save_action) { - action = find_existing_action(key, rsc, on_node, data_set); - } - - if (action == NULL) { - action = new_action(key, task, rsc, on_node, optional, save_action, - data_set); - } else { - free(key); - } - - update_action_optional(action, optional); - - if (rsc != NULL) { - if (action->node != NULL) { - unpack_action_node_attributes(action, data_set); - } - - update_resource_action_runnable(action, save_action, data_set); - - if (save_action) { - update_resource_flags_for_action(rsc, action); - } - } - - return action; -} - -static bool -valid_stop_on_fail(const char *value) -{ - return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL); -} - -static const char * -unpack_operation_on_fail(pe_action_t * action) -{ - - const char *name = NULL; - const char *role = NULL; - const char *on_fail = NULL; - const char *interval_spec = NULL; - const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); - - if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei) - && !valid_stop_on_fail(value)) { - - pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " - "action to default value because '%s' is not " - "allowed for stop", action->rsc->id, value); - return NULL; - - } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) { - // demote on_fail defaults to monitor value for promoted role if present - xmlNode *operation = NULL; - - CRM_CHECK(action->rsc != NULL, return NULL); - - for (operation = pcmk__xe_first_child(action->rsc->ops_xml); - (operation != NULL) && (value == NULL); - operation = pcmk__xe_next(operation)) { - bool enabled = false; - - if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { - continue; - } - name = crm_element_value(operation, "name"); - role = crm_element_value(operation, "role"); - on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL); - interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); - if (!on_fail) { - continue; - } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) { - continue; - } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei) - || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S, - RSC_ROLE_PROMOTED_LEGACY_S, - NULL)) { - continue; - } else if (crm_parse_interval_spec(interval_spec) == 0) { - continue; - } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) { - continue; - } - - value = on_fail; - } - } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { - value = "ignore"; - - } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { - name = crm_element_value(action->op_entry, "name"); - role = crm_element_value(action->op_entry, "role"); - interval_spec = crm_element_value(action->op_entry, - XML_LRM_ATTR_INTERVAL); - - if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei) - && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei) - || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S, - RSC_ROLE_PROMOTED_LEGACY_S, NULL) - || (crm_parse_interval_spec(interval_spec) == 0))) { - pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s " - "action to default value because 'demote' is not " - "allowed for it", action->rsc->id, name); - return NULL; - } - } - - return value; -} - -static xmlNode * -find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled) -{ - guint interval_ms = 0; - guint min_interval_ms = G_MAXUINT; - const char *name = NULL; - const char *interval_spec = NULL; - xmlNode *op = NULL; - xmlNode *operation = NULL; - - for (operation = pcmk__xe_first_child(rsc->ops_xml); - operation != NULL; - operation = pcmk__xe_next(operation)) { - - if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { - bool enabled = false; - - name = crm_element_value(operation, "name"); - interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); - if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && - !enabled) { - continue; - } - - if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) { - continue; - } - - interval_ms = crm_parse_interval_spec(interval_spec); - - if (interval_ms && (interval_ms < min_interval_ms)) { - min_interval_ms = interval_ms; - op = operation; - } - } - } - - return op; -} - -static int -unpack_start_delay(const char *value, GHashTable *meta) -{ - int start_delay = 0; - - if (value != NULL) { - start_delay = crm_get_msec(value); - - if (start_delay < 0) { - start_delay = 0; - } - - if (meta) { - g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), - pcmk__itoa(start_delay)); - } - } - - return start_delay; -} - -// true if value contains valid, non-NULL interval origin for recurring op -static bool -unpack_interval_origin(const char *value, xmlNode *xml_obj, guint interval_ms, - crm_time_t *now, long long *start_delay) -{ - long long result = 0; - guint interval_sec = interval_ms / 1000; - crm_time_t *origin = NULL; - - // Ignore unspecified values and non-recurring operations - if ((value == NULL) || (interval_ms == 0) || (now == NULL)) { - return false; - } - - // Parse interval origin from text - origin = crm_time_new(value); - if (origin == NULL) { - pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation " - "'%s' because '%s' is not valid", - (ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value); - return false; - } - - // Get seconds since origin (negative if origin is in the future) - result = crm_time_get_seconds(now) - crm_time_get_seconds(origin); - crm_time_free(origin); - - // Calculate seconds from closest interval to now - result = result % interval_sec; - - // Calculate seconds remaining until next interval - result = ((result <= 0)? 0 : interval_sec) - result; - crm_info("Calculated a start delay of %llds for operation '%s'", - result, - (ID(xml_obj)? ID(xml_obj) : "(unspecified)")); - - if (start_delay != NULL) { - *start_delay = result * 1000; // milliseconds - } - return true; -} - -static int -unpack_timeout(const char *value) -{ - int timeout_ms = crm_get_msec(value); - - if (timeout_ms < 0) { - timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); - } - return timeout_ms; -} - -int -pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set) -{ - xmlNode *child = NULL; - GHashTable *action_meta = NULL; - const char *timeout_spec = NULL; - int timeout_ms = 0; - - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); - child != NULL; child = crm_next_same_xml(child)) { - if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME), - pcmk__str_casei)) { - timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT); - break; - } - } - - if (timeout_spec == NULL && data_set->op_defaults) { - action_meta = pcmk__strkey_table(free, free); - pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, - &rule_data, action_meta, NULL, FALSE, data_set); - timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); - } - - // @TODO check meta-attributes (including versioned meta-attributes) - // @TODO maybe use min-interval monitor timeout as default for monitors - - timeout_ms = crm_get_msec(timeout_spec); - if (timeout_ms < 0) { - timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S); - } - - if (action_meta != NULL) { - g_hash_table_destroy(action_meta); - } - return timeout_ms; -} - -#if ENABLE_VERSIONED_ATTRS -static void -unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, - guint interval_ms, crm_time_t *now) -{ - xmlNode *attrs = NULL; - xmlNode *attr = NULL; - - for (attrs = pcmk__xe_first_child(versioned_meta); attrs != NULL; - attrs = pcmk__xe_next(attrs)) { - - for (attr = pcmk__xe_first_child(attrs); attr != NULL; - attr = pcmk__xe_next(attr)) { - - const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME); - const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE); - - if (pcmk__str_eq(name, XML_OP_ATTR_START_DELAY, pcmk__str_casei)) { - int start_delay = unpack_start_delay(value, NULL); - - crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay); - } else if (pcmk__str_eq(name, XML_OP_ATTR_ORIGIN, pcmk__str_casei)) { - long long start_delay = 0; - - if (unpack_interval_origin(value, xml_obj, interval_ms, now, - &start_delay)) { - crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, - XML_OP_ATTR_START_DELAY); - crm_xml_add_ll(attr, XML_NVPAIR_ATTR_VALUE, start_delay); - } - } else if (pcmk__str_eq(name, XML_ATTR_TIMEOUT, pcmk__str_casei)) { - int timeout_ms = unpack_timeout(value); - - crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout_ms); - } - } - } -} -#endif - -/*! - * \brief Unpack operation XML into an action structure - * - * Unpack an operation's meta-attributes (normalizing the interval, timeout, - * and start delay values as integer milliseconds), requirements, and - * failure policy. - * - * \param[in,out] action Action to unpack into - * \param[in] xml_obj Operation XML (or NULL if all defaults) - * \param[in] container Resource that contains affected resource, if any - * \param[in] data_set Cluster state - * \param[in] interval_ms How frequently to perform the operation - */ -static void -unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, - pe_working_set_t * data_set, guint interval_ms) -{ - int timeout_ms = 0; - const char *value = NULL; - bool is_probe = false; -#if ENABLE_VERSIONED_ATTRS - pe_rsc_action_details_t *rsc_details = NULL; -#endif - - pe_rsc_eval_data_t rsc_rule_data = { - .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS), - .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER), - .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE) - }; - - pe_op_eval_data_t op_rule_data = { - .op_name = action->task, - .interval = interval_ms - }; - - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = &rsc_rule_data, - .op_data = &op_rule_data - }; - - CRM_CHECK(action && action->rsc, return); - - is_probe = pcmk_is_probe(action->task, interval_ms); - - // Cluster-wide - pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, - action->meta, NULL, FALSE, data_set); - - // Determine probe default timeout differently - if (is_probe) { - xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE); - - if (min_interval_mon) { - value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT); - if (value) { - crm_trace("\t%s: Setting default timeout to minimum-interval " - "monitor's timeout '%s'", action->uuid, value); - g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), - strdup(value)); - } - } - } - - if (xml_obj) { - xmlAttrPtr xIter = NULL; - - // take precedence over defaults - pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data, - action->meta, NULL, TRUE, data_set); - -#if ENABLE_VERSIONED_ATTRS - rsc_details = pe_rsc_action_details(action); - - /* Non-versioned attributes also unpack XML_TAG_ATTR_SETS, but that - * capability is deprecated, so we don't need to extend that support to - * versioned attributes. - */ - pe_eval_versioned_attributes(data_set->input, xml_obj, - XML_TAG_META_SETS, &rule_data, - rsc_details->versioned_meta, - NULL); -#endif - - /* Anything set as an XML property has highest precedence. - * This ensures we use the name and interval from the tag. - */ - for (xIter = xml_obj->properties; xIter; xIter = xIter->next) { - const char *prop_name = (const char *)xIter->name; - const char *prop_value = crm_element_value(xml_obj, prop_name); - - g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value)); - } - } - - g_hash_table_remove(action->meta, "id"); - - // Normalize interval to milliseconds - if (interval_ms > 0) { - g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL), - crm_strdup_printf("%u", interval_ms)); - } else { - g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL); - } - - /* - * Timeout order of precedence: - * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params - * and task is start or a probe; pcmk_monitor_timeout works - * by default for a recurring monitor) - * 2. explicit op timeout on the primitive - * 3. default op timeout - * a. if probe, then min-interval monitor's timeout - * b. else, in XML_CIB_TAG_OPCONFIG - * 4. CRM_DEFAULT_OP_TIMEOUT_S - * - * #1 overrides general rule of XML property having highest - * precedence. - */ - if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard), - pcmk_ra_cap_fence_params) - && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei) - || is_probe)) { - - GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set); - - value = g_hash_table_lookup(params, "pcmk_monitor_timeout"); - - if (value) { - crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', " - "overriding default", action->uuid, value); - g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), - strdup(value)); - } - } - - // Normalize timeout to positive milliseconds - value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT); - timeout_ms = unpack_timeout(value); - g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), - pcmk__itoa(timeout_ms)); - - if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) { - action->needs = rsc_req_nothing; - value = "nothing (not start or promote)"; - - } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) { - action->needs = rsc_req_stonith; - value = "fencing"; - - } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) { - action->needs = rsc_req_quorum; - value = "quorum"; - - } else { - action->needs = rsc_req_nothing; - value = "nothing"; - } - pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value); - - value = unpack_operation_on_fail(action); - - if (value == NULL) { - - } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) { - action->on_fail = action_fail_block; - g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block")); - value = "block"; // The above could destroy the original string - - } else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) { - action->on_fail = action_fail_fence; - value = "node fencing"; - - if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { - pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for " - "operation '%s' to 'stop' because 'fence' is not " - "valid when fencing is disabled", action->uuid); - action->on_fail = action_fail_stop; - action->fail_role = RSC_ROLE_STOPPED; - value = "stop resource"; - } - - } else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) { - action->on_fail = action_fail_standby; - value = "node standby"; - - } else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING, - NULL)) { - action->on_fail = action_fail_ignore; - value = "ignore"; - - } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) { - action->on_fail = action_fail_migrate; - value = "force migration"; - - } else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) { - action->on_fail = action_fail_stop; - action->fail_role = RSC_ROLE_STOPPED; - value = "stop resource"; - - } else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) { - action->on_fail = action_fail_recover; - value = "restart (and possibly migrate)"; - - } else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) { - if (container) { - action->on_fail = action_fail_restart_container; - value = "restart container (and possibly migrate)"; - - } else { - value = NULL; - } - - } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) { - action->on_fail = action_fail_demote; - value = "demote instance"; - - } else { - pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); - value = NULL; - } - - /* defaults */ - if (value == NULL && container) { - action->on_fail = action_fail_restart_container; - value = "restart container (and possibly migrate) (default)"; - - /* For remote nodes, ensure that any failure that results in dropping an - * active connection to the node results in fencing of the node. - * - * There are only two action failures that don't result in fencing. - * 1. probes - probe failures are expected. - * 2. start - a start failure indicates that an active connection does not already - * exist. The user can set op on-fail=fence if they really want to fence start - * failures. */ - } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed)) - && pe__resource_is_remote_conn(action->rsc, data_set) - && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei) - && (interval_ms == 0)) - && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) { - - if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) { - action->on_fail = action_fail_stop; - action->fail_role = RSC_ROLE_STOPPED; - value = "stop unmanaged remote node (enforcing default)"; - - } else { - if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { - value = "fence remote node (default)"; - } else { - value = "recover remote node connection (default)"; - } - - if (action->rsc->remote_reconnect_ms) { - action->fail_role = RSC_ROLE_STOPPED; - } - action->on_fail = action_fail_reset_remote; - } - - } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) { - if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) { - action->on_fail = action_fail_fence; - value = "resource fence (default)"; - - } else { - action->on_fail = action_fail_block; - value = "resource block (default)"; - } - - } else if (value == NULL) { - action->on_fail = action_fail_recover; - value = "restart (and possibly migrate) (default)"; - } - - pe_rsc_trace(action->rsc, "%s failure handling: %s", - action->uuid, value); - - value = NULL; - if (xml_obj != NULL) { - value = g_hash_table_lookup(action->meta, "role_after_failure"); - if (value) { - pe_warn_once(pe_wo_role_after, - "Support for role_after_failure is deprecated and will be removed in a future release"); - } - } - if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) { - action->fail_role = text2role(value); - } - /* defaults */ - if (action->fail_role == RSC_ROLE_UNKNOWN) { - if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) { - action->fail_role = RSC_ROLE_UNPROMOTED; - } else { - action->fail_role = RSC_ROLE_STARTED; - } - } - pe_rsc_trace(action->rsc, "%s failure results in: %s", - action->uuid, role2text(action->fail_role)); - - value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY); - if (value) { - unpack_start_delay(value, action->meta); - } else { - long long start_delay = 0; - - value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN); - if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now, - &start_delay)) { - g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY), - crm_strdup_printf("%lld", start_delay)); - } - } - -#if ENABLE_VERSIONED_ATTRS - unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval_ms, - data_set->now); -#endif -} - -static xmlNode * -find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled) -{ - guint interval_ms = 0; - gboolean do_retry = TRUE; - char *local_key = NULL; - const char *name = NULL; - const char *interval_spec = NULL; - char *match_key = NULL; - xmlNode *op = NULL; - xmlNode *operation = NULL; - - retry: - for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL; - operation = pcmk__xe_next(operation)) { - - if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) { - bool enabled = false; - - name = crm_element_value(operation, "name"); - interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL); - if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && - !enabled) { - continue; - } - - interval_ms = crm_parse_interval_spec(interval_spec); - match_key = pcmk__op_key(rsc->id, name, interval_ms); - if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { - op = operation; - } - free(match_key); - - if (rsc->clone_name) { - match_key = pcmk__op_key(rsc->clone_name, name, interval_ms); - if (pcmk__str_eq(key, match_key, pcmk__str_casei)) { - op = operation; - } - free(match_key); - } - - if (op != NULL) { - free(local_key); - return op; - } - } - } - - free(local_key); - if (do_retry == FALSE) { - return NULL; - } - - do_retry = FALSE; - if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) { - local_key = pcmk__op_key(rsc->id, "migrate", 0); - key = local_key; - goto retry; - - } else if (strstr(key, "_notify_")) { - local_key = pcmk__op_key(rsc->id, "notify", 0); - key = local_key; - goto retry; - } - - return NULL; -} - -xmlNode * -find_rsc_op_entry(pe_resource_t * rsc, const char *key) -{ - return find_rsc_op_entry_helper(rsc, key, FALSE); -} - -/* - * Used by the HashTable for-loop - */ -void -print_str_str(gpointer key, gpointer value, gpointer user_data) -{ - crm_trace("%s%s %s ==> %s", - user_data == NULL ? "" : (char *)user_data, - user_data == NULL ? "" : ": ", (char *)key, (char *)value); -} - + * \param[in] nodes Use these nodes + */ void -pe_free_action(pe_action_t * action) +pe__show_node_weights_as(const char *file, const char *function, int line, + bool to_log, pe_resource_t *rsc, const char *comment, + GHashTable *nodes, pe_working_set_t *data_set) { - if (action == NULL) { + if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) { + // Don't show allocation scores for orphans return; } - g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */ - g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */ - if (action->extra) { - g_hash_table_destroy(action->extra); - } - if (action->meta) { - g_hash_table_destroy(action->meta); - } -#if ENABLE_VERSIONED_ATTRS - if (action->rsc) { - pe_free_rsc_action_details(action); + if (nodes == NULL) { + // Nothing to show + return; } -#endif - free(action->cancel_task); - free(action->reason); - free(action->task); - free(action->uuid); - free(action->node); - free(action); -} - -GList * -find_recurring_actions(GList *input, pe_node_t * not_on_node) -{ - const char *value = NULL; - GList *result = NULL; - GList *gIter = input; - CRM_CHECK(input != NULL, return NULL); - - for (; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL_MS); - if (value == NULL) { - /* skip */ - } else if (pcmk__str_eq(value, "0", pcmk__str_casei)) { - /* skip */ - } else if (pcmk__str_eq(CRMD_ACTION_CANCEL, action->task, pcmk__str_casei)) { - /* skip */ - } else if (not_on_node == NULL) { - crm_trace("(null) Found: %s", action->uuid); - result = g_list_prepend(result, action); - - } else if (action->node == NULL) { - /* skip */ - } else if (action->node->details != not_on_node->details) { - crm_trace("Found: %s", action->uuid); - result = g_list_prepend(result, action); - } + if (to_log) { + pe__log_node_weights(file, function, line, rsc, comment, nodes); + } else { + pe__output_node_weights(rsc, comment, nodes, data_set); } - return result; -} + // If this resource has children, repeat recursively for each + if (rsc && rsc->children) { + for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { + pe_resource_t *child = (pe_resource_t *) gIter->data; -enum action_tasks -get_complex_task(pe_resource_t * rsc, const char *name, gboolean allow_non_atomic) -{ - enum action_tasks task = text2task(name); - - if (rsc == NULL) { - return task; - - } else if (allow_non_atomic == FALSE || rsc->variant == pe_native) { - switch (task) { - case stopped_rsc: - case started_rsc: - case action_demoted: - case action_promoted: - crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id); - return task - 1; - default: - break; + pe__show_node_weights_as(file, function, line, to_log, child, + comment, child->allowed_nodes, data_set); } } - return task; } -pe_action_t * -find_first_action(GList *input, const char *uuid, const char *task, pe_node_t * on_node) +gint +sort_rsc_priority(gconstpointer a, gconstpointer b) { - GList *gIter = NULL; - - CRM_CHECK(uuid || task, return NULL); - - for (gIter = input; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) { - continue; - - } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) { - continue; - - } else if (on_node == NULL) { - return action; - - } else if (action->node == NULL) { - continue; + const pe_resource_t *resource1 = (const pe_resource_t *)a; + const pe_resource_t *resource2 = (const pe_resource_t *)b; - } else if (on_node->details == action->node->details) { - return action; - } + if (a == NULL && b == NULL) { + return 0; } - - return NULL; -} - -GList * -find_actions(GList *input, const char *key, const pe_node_t *on_node) -{ - GList *gIter = input; - GList *result = NULL; - - CRM_CHECK(key != NULL, return NULL); - - for (; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) { - continue; - - } else if (on_node == NULL) { - crm_trace("Action %s matches (ignoring node)", key); - result = g_list_prepend(result, action); - - } else if (action->node == NULL) { - crm_trace("Action %s matches (unallocated, assigning to %s)", - key, on_node->details->uname); - - action->node = pe__copy_node(on_node); - result = g_list_prepend(result, action); - - } else if (on_node->details == action->node->details) { - crm_trace("Action %s on %s matches", key, on_node->details->uname); - result = g_list_prepend(result, action); - } + if (a == NULL) { + return 1; } - - return result; -} - -GList * -find_actions_exact(GList *input, const char *key, const pe_node_t *on_node) -{ - GList *result = NULL; - - CRM_CHECK(key != NULL, return NULL); - - if (on_node == NULL) { - return NULL; + if (b == NULL) { + return -1; } - for (GList *gIter = input; gIter != NULL; gIter = gIter->next) { - pe_action_t *action = (pe_action_t *) gIter->data; - - if ((action->node != NULL) - && pcmk__str_eq(key, action->uuid, pcmk__str_casei) - && pcmk__str_eq(on_node->details->id, action->node->details->id, - pcmk__str_casei)) { + if (resource1->priority > resource2->priority) { + return -1; + } - crm_trace("Action %s on %s matches", key, on_node->details->uname); - result = g_list_prepend(result, action); - } + if (resource1->priority < resource2->priority) { + return 1; } - return result; + return 0; } -/*! - * \brief Find all actions of given type for a resource - * - * \param[in] rsc Resource to search - * \param[in] node Find only actions scheduled on this node - * \param[in] task Action name to search for - * \param[in] require_node If TRUE, NULL node or action node will not match - * - * \return List of actions found (or NULL if none) - * \note If node is not NULL and require_node is FALSE, matching actions - * without a node will be assigned to node. +/* + * Used by the HashTable for-loop */ -GList * -pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, - const char *task, bool require_node) +void +print_str_str(gpointer key, gpointer value, gpointer user_data) { - GList *result = NULL; - char *key = pcmk__op_key(rsc->id, task, 0); - - if (require_node) { - result = find_actions_exact(rsc->actions, key, node); - } else { - result = find_actions(rsc->actions, key, node); - } - free(key); - return result; + crm_trace("%s%s %s ==> %s", + user_data == NULL ? "" : (char *)user_data, + user_data == NULL ? "" : ": ", (char *)key, (char *)value); } static void resource_node_score(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag) { pe_node_t *match = NULL; if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never)) && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) { /* This string comparision may be fragile, but exclusive resources and * exclusive nodes should not have the symmetric_default constraint * applied to them. */ return; } else if (rsc->children) { GList *gIter = rsc->children; for (; gIter != NULL; gIter = gIter->next) { pe_resource_t *child_rsc = (pe_resource_t *) gIter->data; resource_node_score(child_rsc, node, score, tag); } } pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score); match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id); if (match == NULL) { match = pe__copy_node(node); g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match); } match->weight = pcmk__add_scores(match->weight, score); } void resource_location(pe_resource_t * rsc, pe_node_t * node, int score, const char *tag, pe_working_set_t * data_set) { if (node != NULL) { resource_node_score(rsc, node, score, tag); } else if (data_set != NULL) { GList *gIter = data_set->nodes; for (; gIter != NULL; gIter = gIter->next) { pe_node_t *node_iter = (pe_node_t *) gIter->data; resource_node_score(rsc, node_iter, score, tag); } } else { GHashTableIter iter; pe_node_t *node_iter = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) { resource_node_score(rsc, node_iter, score, tag); } } if (node == NULL && score == -INFINITY) { if (rsc->allocated_to) { crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname); free(rsc->allocated_to); rsc->allocated_to = NULL; } } } -#define sort_return(an_int, why) do { \ - free(a_uuid); \ - free(b_uuid); \ - crm_trace("%s (%d) %c %s (%d) : %s", \ - a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \ - b_xml_id, b_call_id, why); \ - return an_int; \ - } while(0) - -int -pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b, - bool same_node_default) -{ - int a_call_id = -1; - int b_call_id = -1; - - char *a_uuid = NULL; - char *b_uuid = NULL; - - const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID); - const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID); - - const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET); - const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET); - bool same_node = true; - - /* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via - * 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c). - * - * In case that any of the lrm_rsc_op entries doesn't have on_node - * attribute, we need to explicitly tell whether the two operations are on - * the same node. - */ - if (a_node == NULL || b_node == NULL) { - same_node = same_node_default; - - } else { - same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei); - } - - if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) { - /* We have duplicate lrm_rsc_op entries in the status - * section which is unlikely to be a good thing - * - we can handle it easily enough, but we need to get - * to the bottom of why it's happening. - */ - pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id); - sort_return(0, "duplicate"); - } - - crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id); - crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id); - - if (a_call_id == -1 && b_call_id == -1) { - /* both are pending ops so it doesn't matter since - * stops are never pending - */ - sort_return(0, "pending"); - - } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) { - sort_return(-1, "call id"); - - } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) { - sort_return(1, "call id"); - - } else if (a_call_id >= 0 && b_call_id >= 0 - && (!same_node || a_call_id == b_call_id)) { - /* - * The op and last_failed_op are the same - * Order on last-rc-change - */ - time_t last_a = -1; - time_t last_b = -1; - - crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a); - crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b); - - crm_trace("rc-change: %lld vs %lld", - (long long) last_a, (long long) last_b); - if (last_a >= 0 && last_a < last_b) { - sort_return(-1, "rc-change"); - - } else if (last_b >= 0 && last_a > last_b) { - sort_return(1, "rc-change"); - } - sort_return(0, "rc-change"); - - } else { - /* One of the inputs is a pending operation - * Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other - */ - - int a_id = -1; - int b_id = -1; - - const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC); - const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC); - - CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic")); - if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL, - NULL)) { - sort_return(0, "bad magic a"); - } - if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL, - NULL)) { - sort_return(0, "bad magic b"); - } - /* try to determine the relative age of the operation... - * some pending operations (e.g. a start) may have been superseded - * by a subsequent stop - * - * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last - */ - if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) { - /* - * some of the logic in here may be redundant... - * - * if the UUID from the TE doesn't match then one better - * be a pending operation. - * pending operations don't survive between elections and joins - * because we query the LRM directly - */ - - if (b_call_id == -1) { - sort_return(-1, "transition + call"); - - } else if (a_call_id == -1) { - sort_return(1, "transition + call"); - } - - } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) { - sort_return(-1, "transition"); - - } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) { - sort_return(1, "transition"); - } - } - - /* we should never end up here */ - CRM_CHECK(FALSE, sort_return(0, "default")); -} - -gint -sort_op_by_callid(gconstpointer a, gconstpointer b) -{ - const xmlNode *xml_a = a; - const xmlNode *xml_b = b; - - return pe__is_newer_op(xml_a, xml_b, true); -} - time_t get_effective_time(pe_working_set_t * data_set) { if(data_set) { if (data_set->now == NULL) { crm_trace("Recording a new 'now'"); data_set->now = crm_time_new(NULL); } return crm_time_get_seconds_since_epoch(data_set->now); } crm_trace("Defaulting to 'now'"); return time(NULL); } gboolean get_target_role(pe_resource_t * rsc, enum rsc_role_e * role) { enum rsc_role_e local_role = RSC_ROLE_UNKNOWN; const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); CRM_CHECK(role != NULL, return FALSE); if (pcmk__str_eq(value, "started", pcmk__str_null_matches | pcmk__str_casei) || pcmk__str_eq("default", value, pcmk__str_casei)) { return FALSE; } local_role = text2role(value); if (local_role == RSC_ROLE_UNKNOWN) { pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " "because '%s' is not valid", rsc->id, value); return FALSE; } else if (local_role > RSC_ROLE_STARTED) { if (pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable)) { if (local_role > RSC_ROLE_UNPROMOTED) { /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */ return FALSE; } } else { pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s " "because '%s' only makes sense for promotable " "clones", rsc->id, value); return FALSE; } } *role = local_role; return TRUE; } gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order) { GList *gIter = NULL; pe_action_wrapper_t *wrapper = NULL; GList *list = NULL; if (order == pe_order_none) { return FALSE; } if (lh_action == NULL || rh_action == NULL) { return FALSE; } crm_trace("Creating action wrappers for ordering: %s then %s", lh_action->uuid, rh_action->uuid); /* Ensure we never create a dependency on ourselves... it's happened */ CRM_ASSERT(lh_action != rh_action); /* Filter dups, otherwise update_action_states() has too much work to do */ gIter = lh_action->actions_after; for (; gIter != NULL; gIter = gIter->next) { pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data; if (after->action == rh_action && (after->type & order)) { return FALSE; } } wrapper = calloc(1, sizeof(pe_action_wrapper_t)); wrapper->action = rh_action; wrapper->type = order; list = lh_action->actions_after; list = g_list_prepend(list, wrapper); lh_action->actions_after = list; wrapper = calloc(1, sizeof(pe_action_wrapper_t)); wrapper->action = lh_action; wrapper->type = order; list = rh_action->actions_before; list = g_list_prepend(list, wrapper); rh_action->actions_before = list; return TRUE; } -pe_action_t * -get_pseudo_op(const char *name, pe_working_set_t * data_set) -{ - pe_action_t *op = lookup_singleton(data_set, name); - - if (op == NULL) { - op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set); - pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable); - } - return op; -} - void destroy_ticket(gpointer data) { pe_ticket_t *ticket = data; if (ticket->state) { g_hash_table_destroy(ticket->state); } free(ticket->id); free(ticket); } pe_ticket_t * ticket_new(const char *ticket_id, pe_working_set_t * data_set) { pe_ticket_t *ticket = NULL; if (pcmk__str_empty(ticket_id)) { return NULL; } if (data_set->tickets == NULL) { data_set->tickets = pcmk__strkey_table(free, destroy_ticket); } ticket = g_hash_table_lookup(data_set->tickets, ticket_id); if (ticket == NULL) { ticket = calloc(1, sizeof(pe_ticket_t)); if (ticket == NULL) { crm_err("Cannot allocate ticket '%s'", ticket_id); return NULL; } crm_trace("Creaing ticket entry for %s", ticket_id); ticket->id = strdup(ticket_id); ticket->granted = FALSE; ticket->last_granted = -1; ticket->standby = FALSE; ticket->state = pcmk__strkey_table(free, free); g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket); } return ticket; } const char *rsc_printable_id(pe_resource_t *rsc) { if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) { return ID(rsc->xml); } return rsc->id; } void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) { pe__clear_resource_flags(rsc, flags); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags); } } void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag) { for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { pe_resource_t *r = (pe_resource_t *) lpc->data; pe__clear_resource_flags_recursive(r, flag); } } void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags) { pe__set_resource_flags(rsc, flags); for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) { pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags); } } -static GList * -find_unfencing_devices(GList *candidates, GList *matches) -{ - for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) { - pe_resource_t *candidate = gIter->data; - - if (candidate->children != NULL) { - matches = find_unfencing_devices(candidate->children, matches); - - } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) { - continue; - - } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) { - matches = g_list_prepend(matches, candidate); - - } else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta, - PCMK_STONITH_PROVIDES), - PCMK__VALUE_UNFENCING, - pcmk__str_casei)) { - matches = g_list_prepend(matches, candidate); - } - } - return matches; -} - -static int -node_priority_fencing_delay(pe_node_t * node, pe_working_set_t * data_set) -{ - int member_count = 0; - int online_count = 0; - int top_priority = 0; - int lowest_priority = 0; - GList *gIter = NULL; - - // `priority-fencing-delay` is disabled - if (data_set->priority_fencing_delay <= 0) { - return 0; - } - - /* No need to request a delay if the fencing target is not a normal cluster - * member, for example if it's a remote node or a guest node. */ - if (node->details->type != node_member) { - return 0; - } - - // No need to request a delay if the fencing target is in our partition - if (node->details->online) { - return 0; - } - - for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { - pe_node_t *n = gIter->data; - - if (n->details->type != node_member) { - continue; - } - - member_count ++; - - if (n->details->online) { - online_count++; - } - - if (member_count == 1 - || n->details->priority > top_priority) { - top_priority = n->details->priority; - } - - if (member_count == 1 - || n->details->priority < lowest_priority) { - lowest_priority = n->details->priority; - } - } - - // No need to delay if we have more than half of the cluster members - if (online_count > member_count / 2) { - return 0; - } - - /* All the nodes have equal priority. - * Any configured corresponding `pcmk_delay_base/max` will be applied. */ - if (lowest_priority == top_priority) { - return 0; - } - - if (node->details->priority < top_priority) { - return 0; - } - - return data_set->priority_fencing_delay; -} - -pe_action_t * -pe_fence_op(pe_node_t * node, const char *op, bool optional, const char *reason, - bool priority_delay, pe_working_set_t * data_set) -{ - char *op_key = NULL; - pe_action_t *stonith_op = NULL; - - if(op == NULL) { - op = data_set->stonith_action; - } - - op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op); - - stonith_op = lookup_singleton(data_set, op_key); - if(stonith_op == NULL) { - stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set); - - add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname); - add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id); - add_hash_param(stonith_op->meta, "stonith_action", op); - - if (pe__is_guest_or_remote_node(node) - && pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { - /* Extra work to detect device changes on remotes - * - * We may do this for all nodes in the future, but for now - * the pcmk__check_action_config() based stuff works fine. - */ - long max = 1024; - long digests_all_offset = 0; - long digests_secure_offset = 0; - - char *digests_all = calloc(max, sizeof(char)); - char *digests_secure = calloc(max, sizeof(char)); - GList *matches = find_unfencing_devices(data_set->resources, NULL); - - for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) { - pe_resource_t *match = gIter->data; - const char *agent = g_hash_table_lookup(match->meta, - XML_ATTR_TYPE); - op_digest_cache_t *data = NULL; - - data = pe__compare_fencing_digest(match, agent, node, data_set); - if(data->rc == RSC_DIGEST_ALL) { - optional = FALSE; - crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id); - if (!pcmk__is_daemon && data_set->priv != NULL) { - pcmk__output_t *out = data_set->priv; - out->info(out, "notice: Unfencing %s (remote): because the definition of %s changed", - node->details->uname, match->id); - } - } - - digests_all_offset += snprintf( - digests_all+digests_all_offset, max-digests_all_offset, - "%s:%s:%s,", match->id, agent, data->digest_all_calc); - - digests_secure_offset += snprintf( - digests_secure+digests_secure_offset, max-digests_secure_offset, - "%s:%s:%s,", match->id, agent, data->digest_secure_calc); - } - g_hash_table_insert(stonith_op->meta, - strdup(XML_OP_ATTR_DIGESTS_ALL), - digests_all); - g_hash_table_insert(stonith_op->meta, - strdup(XML_OP_ATTR_DIGESTS_SECURE), - digests_secure); - } - - } else { - free(op_key); - } - - if (data_set->priority_fencing_delay > 0 - - /* It's a suitable case where `priority-fencing-delay` applies. - * At least add `priority-fencing-delay` field as an indicator. */ - && (priority_delay - - /* The priority delay needs to be recalculated if this function has - * been called by schedule_fencing_and_shutdowns() after node - * priority has already been calculated by native_add_running(). - */ - || g_hash_table_lookup(stonith_op->meta, - XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) { - - /* Add `priority-fencing-delay` to the fencing op even if it's 0 for - * the targeting node. So that it takes precedence over any possible - * `pcmk_delay_base/max`. - */ - char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set)); - - g_hash_table_insert(stonith_op->meta, - strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY), - delay_s); - } - - if(optional == FALSE && pe_can_fence(data_set, node)) { - pe__clear_action_flags(stonith_op, pe_action_optional); - pe_action_set_reason(stonith_op, reason, false); - - } else if(reason && stonith_op->reason == NULL) { - stonith_op->reason = strdup(reason); - } - - return stonith_op; -} - void trigger_unfencing( pe_resource_t * rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t * data_set) { if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) { /* No resources require it */ return; } else if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) { /* Wasn't a stonith device */ return; } else if(node && node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set); if(dependency) { order_actions(unfence, dependency, pe_order_optional); } } else if(rsc) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) { trigger_unfencing(rsc, node, reason, dependency, data_set); } } } } gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref) { pe_tag_t *tag = NULL; GList *gIter = NULL; gboolean is_existing = FALSE; CRM_CHECK(tags && tag_name && obj_ref, return FALSE); tag = g_hash_table_lookup(tags, tag_name); if (tag == NULL) { tag = calloc(1, sizeof(pe_tag_t)); if (tag == NULL) { return FALSE; } tag->id = strdup(tag_name); tag->refs = NULL; g_hash_table_insert(tags, strdup(tag_name), tag); } for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) { const char *existing_ref = (const char *) gIter->data; if (pcmk__str_eq(existing_ref, obj_ref, pcmk__str_none)){ is_existing = TRUE; break; } } if (is_existing == FALSE) { tag->refs = g_list_append(tag->refs, strdup(obj_ref)); crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref); } return TRUE; } -/*! - * \internal - * \brief Create an action reason string based on the action itself - * - * \param[in] action Action to create reason string for - * \param[in] flag Action flag that was cleared - * - * \return Newly allocated string suitable for use as action reason - * \note It is the caller's responsibility to free() the result. - */ -char * -pe__action2reason(pe_action_t *action, enum pe_action_flags flag) -{ - const char *change = NULL; - - switch (flag) { - case pe_action_runnable: - case pe_action_migrate_runnable: - change = "unrunnable"; - break; - case pe_action_optional: - change = "required"; - break; - default: - // Bug: caller passed unsupported flag - CRM_CHECK(change != NULL, change = ""); - break; - } - return crm_strdup_printf("%s%s%s %s", change, - (action->rsc == NULL)? "" : " ", - (action->rsc == NULL)? "" : action->rsc->id, - action->task); -} - -void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite) -{ - if (action->reason != NULL && overwrite) { - pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", - action->uuid, action->reason, pcmk__s(reason, "(none)")); - } else if (action->reason == NULL) { - pe_rsc_trace(action->rsc, "Set %s reason to '%s'", - action->uuid, pcmk__s(reason, "(none)")); - } else { - // crm_assert(action->reason != NULL && !overwrite); - return; - } - - pcmk__str_update(&action->reason, reason); -} - /*! * \internal * \brief Check whether shutdown has been requested for a node * * \param[in] node Node to check * * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise * \note This differs from simply using node->details->shutdown in that it can * be used before that has been determined (and in fact to determine it), * and it can also be used to distinguish requested shutdown from implicit * shutdown of remote nodes by virtue of their connection stopping. */ bool pe__shutdown_requested(pe_node_t *node) { const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN); return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches); } /*! * \internal * \brief Update a data set's "recheck by" time * * \param[in] recheck Epoch time when recheck should happen * \param[in,out] data_set Current working set */ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) { if ((recheck > get_effective_time(data_set)) && ((data_set->recheck_by == 0) || (data_set->recheck_by > recheck))) { data_set->recheck_by = recheck; } } /*! * \internal * \brief Wrapper for pe_unpack_nvpairs() using a cluster working set */ void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, pe_rule_eval_data_t *rule_data, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set) { crm_time_t *next_change = crm_time_new_undefined(); pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash, always_first, overwrite, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); pe__update_recheck_time(recheck, data_set); } crm_time_free(next_change); } bool pe__resource_is_disabled(pe_resource_t *rsc) { const char *target_role = NULL; CRM_CHECK(rsc != NULL, return false); target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE); if (target_role) { enum rsc_role_e target_role_e = text2role(target_role); if ((target_role_e == RSC_ROLE_STOPPED) || ((target_role_e == RSC_ROLE_UNPROMOTED) && pcmk_is_set(uber_parent(rsc)->flags, pe_rsc_promotable))) { return true; } } return false; } -/*! - * \internal - * \brief Create an action to clear a resource's history from CIB - * - * \param[in] rsc Resource to clear - * \param[in] node Node to clear history on - * - * \return New action to clear resource history - */ -pe_action_t * -pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, - pe_working_set_t *data_set) -{ - char *key = NULL; - - CRM_ASSERT(rsc && node); - key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0); - return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE, - data_set); -} - bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list) { for (GList *ele = rsc->running_on; ele; ele = ele->next) { pe_node_t *node = (pe_node_t *) ele->data; if (pcmk__str_in_list(node->details->uname, node_list, pcmk__str_star_matches|pcmk__str_casei)) { return true; } } return false; } bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node) { return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node)); } GList * pe__filter_rsc_list(GList *rscs, GList *filter) { GList *retval = NULL; for (GList *gIter = rscs; gIter; gIter = gIter->next) { pe_resource_t *rsc = (pe_resource_t *) gIter->data; /* I think the second condition is safe here for all callers of this * function. If not, it needs to move into pe__node_text. */ if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) || (rsc->parent && pcmk__str_in_list(rsc_printable_id(rsc->parent), filter, pcmk__str_star_matches))) { retval = g_list_prepend(retval, rsc); } } return retval; } GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s) { GList *nodes = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { /* Nothing was given so return a list of all node names. Or, '*' was * given. This would normally fall into the pe__unames_with_tag branch * where it will return an empty list. Catch it here instead. */ nodes = g_list_prepend(nodes, strdup("*")); } else { pe_node_t *node = pe_find_node(data_set->nodes, s); if (node) { /* The given string was a valid uname for a node. Return a * singleton list containing just that uname. */ nodes = g_list_prepend(nodes, strdup(s)); } else { /* The given string was not a valid uname. It's either a tag or * it's a typo or something. In the first case, we'll return a * list of all the unames of the nodes with the given tag. In the * second case, we'll return a NULL pointer and nothing will * get displayed. */ nodes = pe__unames_with_tag(data_set, s); } } return nodes; } GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s) { GList *resources = NULL; if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) { resources = g_list_prepend(resources, strdup("*")); } else { pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s, pe_find_renamed|pe_find_any); if (rsc) { /* A colon in the name we were given means we're being asked to filter * on a specific instance of a cloned resource. Put that exact string * into the filter list. Otherwise, use the printable ID of whatever * resource was found that matches what was asked for. */ if (strstr(s, ":") != NULL) { resources = g_list_prepend(resources, strdup(rsc->id)); } else { resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc))); } } else { /* The given string was not a valid resource name. It's either * a tag or it's a typo or something. See build_uname_list for * more detail. */ resources = pe__rscs_with_tag(data_set, s); } } return resources; } xmlNode * pe__failed_probe_for_rsc(pe_resource_t *rsc, const char *name) { pe_resource_t *parent = uber_parent(rsc); const char *rsc_id = rsc->id; if (rsc->variant == pe_clone) { rsc_id = pe__clone_child_id(rsc); } else if (parent->variant == pe_clone) { rsc_id = pe__clone_child_id(parent); } for (xmlNode *xml_op = pcmk__xml_first_child(rsc->cluster->failed); xml_op != NULL; xml_op = pcmk__xml_next(xml_op)) { const char *value = NULL; char *op_id = NULL; /* This resource operation is not a failed probe. */ if (!pcmk_xe_mask_probe_failure(xml_op)) { continue; } /* This resource operation was not run on the given node. Note that if name is * NULL, this will always succeed. */ value = crm_element_value(xml_op, XML_LRM_ATTR_TARGET); if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) { continue; } /* This resource operation has no operation_key. */ value = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY); if (!parse_op_key(value ? value : ID(xml_op), &op_id, NULL, NULL)) { continue; } /* This resource operation's ID does not match the rsc_id we are looking for. */ if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) { free(op_id); continue; } free(op_id); return xml_op; } return NULL; }