Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F4638498
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
199 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index cda3e71293..e887870ec7 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,686 +1,686 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
# include <stdbool.h>
# include <stdint.h>
# include <string.h>
# include <crm/msg_xml.h>
# include <crm/pengine/status.h>
# include <crm/pengine/remote_internal.h>
# include <crm/common/internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
# include <crm/common/scheduler_internal.h>
const char *pe__resource_description(const pcmk_resource_t *rsc,
uint32_t show_opts);
bool pe__clone_is_ordered(const pcmk_resource_t *clone);
int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag);
bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags);
bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags);
pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group);
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_err(fmt...) do { \
was_processing_error = TRUE; \
pcmk__config_err(fmt); \
} while (0)
# define pe_warn(fmt...) do { \
was_processing_warning = TRUE; \
pcmk__config_warn(fmt); \
} while (0)
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
#define pe__set_working_set_flags(scheduler, flags_to_set) do { \
(scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", crm_system_name, \
(scheduler)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_working_set_flags(scheduler, flags_to_clear) do { \
(scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", crm_system_name, \
(scheduler)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_resource_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_resource_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_action_flags(action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags(action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
action_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action", action_name, \
(action_flags), \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
action_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", action_name, \
(action_flags), \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_action_flags_as(function, line, action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_order_flags(order_flags, flags_to_set) do { \
order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_order_flags(order_flags, flags_to_clear) do { \
order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (!pcmk_is_set(pcmk__warnings, pe_wo_bit)) { \
if (pe_wo_bit == pcmk__wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
pcmk__warnings = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Warn-once", "logging", \
pcmk__warnings, \
(pe_wo_bit), #pe_wo_bit); \
} \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
pcmk_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
GList *node_list_rh; // List of pcmk_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
uint32_t flags; // Group of enum pcmk__action_relation_flags
void *lh_opaque;
pcmk_resource_t *lh_rsc;
pcmk_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
pcmk_resource_t *rh_rsc;
pcmk_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc,
bool include_bundle);
int pe__clone_max(const pcmk_resource_t *clone);
int pe__clone_node_max(const pcmk_resource_t *clone);
int pe__clone_promoted_max(const pcmk_resource_t *clone);
int pe__clone_promoted_node_max(const pcmk_resource_t *clone);
void pe__create_clone_notifications(pcmk_resource_t *clone);
void pe__free_clone_notification_data(pcmk_resource_t *clone);
void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
pcmk_action_t *start,
pcmk_action_t *started,
pcmk_action_t *stop,
pcmk_action_t *stopped);
pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task,
bool optional, bool runnable);
void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone,
bool any_promoting, bool any_demoting);
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
const char *name, pcmk_scheduler_t *scheduler);
pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list,
int current);
void pe_metadata(pcmk__output_t *out);
void verify_pe_options(GHashTable * options);
void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_scheduler_t *scheduler, gboolean failed);
gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id,
const pcmk_node_t *node, int flags);
gboolean native_active(pcmk_resource_t *rsc, gboolean all);
gboolean group_active(pcmk_resource_t *rsc, gboolean all);
gboolean clone_active(pcmk_resource_t *rsc, gboolean all);
gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all);
//! \deprecated This function will be removed in a future release
void native_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void group_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void clone_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
char *pe__node_display_name(pcmk_node_t *node, bool print_detail);
// Clone notifications (pe_notif.c)
void pe__order_notifs_after_fencing(const pcmk_action_t *action,
pcmk_resource_t *rsc,
pcmk_action_t *stonith_op);
static inline const char *
pe__rsc_bool_str(const pcmk_resource_t *rsc, uint64_t rsc_flag)
{
return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
}
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_default(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_default(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
void native_free(pcmk_resource_t *rsc);
void group_free(pcmk_resource_t *rsc);
void clone_free(pcmk_resource_t *rsc);
void pe__free_bundle(pcmk_resource_t *rsc);
enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc,
gboolean current);
enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc,
gboolean current);
enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc,
gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc,
gboolean current);
void pe__count_common(pcmk_resource_t *rsc);
void pe__count_bundle(pcmk_resource_t *rsc);
void common_free(pcmk_resource_t *rsc);
pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node);
time_t get_effective_time(pcmk_scheduler_t *scheduler);
/* Failure handling utilities (from failcounts.c) */
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
time_t *last_failure, uint32_t flags,
const xmlNode *xml_op);
pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc,
const pcmk_node_t *node, const char *reason,
pcmk_scheduler_t *scheduler);
/* Functions for finding/counting a resource's active nodes */
bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean);
pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc,
unsigned int *count);
static inline pcmk_node_t *
pe__current_node(const pcmk_resource_t *rsc)
{
return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
GHashTable *pe__node_list2table(const GList *list);
pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler);
gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
uint32_t flags);
void pe__show_node_scores_as(const char *file, const char *function,
int line, bool to_log, const pcmk_resource_t *rsc,
const char *comment, GHashTable *nodes,
pcmk_scheduler_t *scheduler);
#define pe__show_node_scores(level, rsc, text, nodes, scheduler) \
pe__show_node_scores_as(__FILE__, __func__, __LINE__, \
(level), (rsc), (text), (nodes), (scheduler))
-xmlNode *find_rsc_op_entry(const pcmk_resource_t *rsc, const char *key);
-
GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc,
const pcmk_node_t *node,
const char *action_name, guint interval_ms,
const xmlNode *action_config);
GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
GHashTable *node_attrs,
pcmk_scheduler_t *data_set);
+xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc, const char *key,
+ bool include_disabled);
pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task,
const pcmk_node_t *on_node, gboolean optional,
gboolean foo, pcmk_scheduler_t *scheduler);
# define delete_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
rsc, delete_key(rsc), PCMK_ACTION_DELETE, node, \
optional, TRUE, rsc->cluster);
# define stop_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
rsc, stop_key(rsc), PCMK_ACTION_STOP, node, \
optional, TRUE, rsc->cluster);
# define reload_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RELOAD_AGENT, 0)
# define start_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
rsc, start_key(rsc), PCMK_ACTION_START, node, \
optional, TRUE, rsc->cluster)
# define promote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
rsc, promote_key(rsc), PCMK_ACTION_PROMOTE, node, \
optional, TRUE, rsc->cluster)
# define demote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
rsc, demote_key(rsc), PCMK_ACTION_DEMOTE, node, \
optional, TRUE, rsc->cluster)
extern int pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
pcmk_scheduler_t *scheduler);
pcmk_action_t *find_first_action(const GList *input, const char *uuid,
const char *task, const pcmk_node_t *on_node);
enum action_tasks get_complex_task(const pcmk_resource_t *rsc,
const char *name);
GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pcmk_node_t *on_node);
GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pcmk_action_t *action);
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag, pcmk_scheduler_t *scheduler);
extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role);
void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role,
const char *why);
pcmk_resource_t *find_clone_instance(const pcmk_resource_t *rsc,
const char *sub_id);
extern void destroy_ticket(gpointer data);
pe_ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(const pcmk_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return false;
}
int pe__target_rc_from_xml(const xmlNode *xml_op);
gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any);
typedef struct op_digest_cache_s {
enum pcmk__digest_result rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
char *digest_all_calc;
char *digest_secure_calc;
char *digest_restart_calc;
} op_digest_cache_t;
op_digest_cache_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
guint *interval_ms,
const pcmk_node_t *node,
const xmlNode *xml_op,
GHashTable *overrides,
bool calc_secure,
pcmk_scheduler_t *scheduler);
void pe__free_digests(gpointer ptr);
op_digest_cache_t *rsc_action_digest_cmp(pcmk_resource_t *rsc,
const xmlNode *xml_op,
pcmk_node_t *node,
pcmk_scheduler_t *scheduler);
pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay,
pcmk_scheduler_t *scheduler);
void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node,
const char *reason, pcmk_action_t *dependency,
pcmk_scheduler_t *scheduler);
char *pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag);
void pe_action_set_reason(pcmk_action_t *action, const char *reason,
bool overwrite);
void pe__add_action_expected_result(pcmk_action_t *action, int expected_result);
void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler,
uint64_t flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
//! \deprecated This function will be removed in a future release
void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay);
pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pcmk_scheduler_t *scheduler);
//! \deprecated This function will be removed in a future release
void common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name,
const pcmk_node_t *node, long options, void *print_data);
int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
unsigned int options);
int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
unsigned int options);
//! A single instance of a bundle
typedef struct {
int offset; //!< 0-origin index of this instance in bundle
char *ipaddr; //!< IP address associated with this instance
pcmk_node_t *node; //!< Node created for this instance
pcmk_resource_t *ip; //!< IP address resource for ipaddr
pcmk_resource_t *child; //!< Instance of bundled resource
pcmk_resource_t *container; //!< Container associated with this instance
pcmk_resource_t *remote; //!< Pacemaker Remote connection into container
} pe__bundle_replica_t;
GList *pe__bundle_containers(const pcmk_resource_t *bundle);
int pe__bundle_max(const pcmk_resource_t *rsc);
bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
const pcmk_node_t *node);
pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc);
const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance);
pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle);
void pe__foreach_bundle_replica(pcmk_resource_t *bundle,
bool (*fn)(pe__bundle_replica_t *, void *),
void *user_data);
void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
bool (*fn)(const pe__bundle_replica_t *,
void *),
void *user_data);
pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle,
const pcmk_node_t *node);
bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc);
const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc,
pcmk_scheduler_t *scheduler,
xmlNode *xml, const char *field);
const char *pe__node_attribute_calculated(const pcmk_node_t *node,
const char *name,
const pcmk_resource_t *rsc,
enum pcmk__rsc_node node_type,
bool force_host);
const char *pe_node_attribute_raw(const pcmk_node_t *node, const char *name);
bool pe__is_universal_clone(const pcmk_resource_t *rsc,
const pcmk_scheduler_t *scheduler);
void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
pcmk_node_t *node, enum pcmk__check_parameters,
pcmk_scheduler_t *scheduler);
void pe__foreach_param_check(pcmk_scheduler_t *scheduler,
void (*cb)(pcmk_resource_t*, pcmk_node_t*,
const xmlNode*,
enum pcmk__check_parameters));
void pe__free_param_checks(pcmk_scheduler_t *scheduler);
bool pe__shutdown_requested(const pcmk_node_t *node);
void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Register xml formatting message functions.
*
* \param[in,out] out Output object to register messages with
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
gboolean overwrite,
pcmk_scheduler_t *scheduler);
bool pe__resource_is_disabled(const pcmk_resource_t *rsc);
void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node);
GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc,
const char *tag);
bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node,
const char *tag);
bool pe__rsc_running_on_only(const pcmk_resource_t *rsc,
const pcmk_node_t *node);
bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s);
GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s);
bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node);
gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name);
const char *pe__clone_child_id(const pcmk_resource_t *rsc);
int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health);
int pe__node_health(pcmk_node_t *node);
static inline enum pcmk__health_strategy
pe__health_strategy(pcmk_scheduler_t *scheduler)
{
return pcmk__parse_health_strategy(pe_pref(scheduler->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY));
}
static inline int
pe__health_score(const char *option, pcmk_scheduler_t *scheduler)
{
return char2score(pe_pref(scheduler->config_hash, option));
}
/*!
* \internal
* \brief Return a string suitable for logging as a node name
*
* \param[in] node Node to return a node name string for
*
* \return Node name if available, otherwise node ID if available,
* otherwise "unspecified node" if node is NULL or "unidentified node"
* if node has neither a name nor ID.
*/
static inline const char *
pe__node_name(const pcmk_node_t *node)
{
if (node == NULL) {
return "unspecified node";
} else if (node->details->uname != NULL) {
return node->details->uname;
} else if (node->details->id != NULL) {
return node->details->id;
} else {
return "unidentified node";
}
}
/*!
* \internal
* \brief Check whether two node objects refer to the same node
*
* \param[in] node1 First node object to compare
* \param[in] node2 Second node object to compare
*
* \return true if \p node1 and \p node2 refer to the same node
*/
static inline bool
pe__same_node(const pcmk_node_t *node1, const pcmk_node_t *node2)
{
return (node1 != NULL) && (node2 != NULL)
&& (node1->details == node2->details);
}
/*!
* \internal
* \brief Get the operation key from an action history entry
*
* \param[in] xml Action history entry
*
* \return Entry's operation key
*/
static inline const char *
pe__xe_history_key(const xmlNode *xml)
{
if (xml == NULL) {
return NULL;
} else {
/* @COMPAT Pacemaker <= 1.1.5 did not add the key, and used the ID
* instead. Checking for that allows us to process old saved CIBs,
* including some regression tests.
*/
const char *key = crm_element_value(xml, XML_LRM_ATTR_TASK_KEY);
return pcmk__str_empty(key)? ID(xml) : key;
}
}
#endif
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 1d302cc1e0..1f7ab07ab4 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1942 +1,1943 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <sys/param.h>
#include <glib.h>
#include <crm/lrmd_internal.h>
#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Get the action flags relevant to ordering constraints
*
* \param[in,out] action Action to check
* \param[in] node Node that *other* action in the ordering is on
* (used only for clone resource actions)
*
* \return Action flags that should be used for orderings
*/
static uint32_t
action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
{
bool runnable = false;
uint32_t flags;
// For non-resource actions, return the action flags
if (action->rsc == NULL) {
return action->flags;
}
/* For non-clone resources, or a clone action not assigned to a node,
* return the flags as determined by the resource method without a node
* specified.
*/
flags = action->rsc->cmds->action_flags(action, NULL);
if ((node == NULL) || !pe_rsc_is_clone(action->rsc)) {
return flags;
}
/* Otherwise (i.e., for clone resource actions on a specific node), first
* remember whether the non-node-specific action is runnable.
*/
runnable = pcmk_is_set(flags, pcmk_action_runnable);
// Then recheck the resource method with the node
flags = action->rsc->cmds->action_flags(action, node);
/* For clones in ordering constraints, the node-specific "runnable" doesn't
* matter, just the non-node-specific setting (i.e., is the action runnable
* anywhere).
*
* This applies only to runnable, and only for ordering constraints. This
* function shouldn't be used for other types of constraints without
* changes. Not very satisfying, but it's logical and appears to work well.
*/
if (runnable && !pcmk_is_set(flags, pcmk_action_runnable)) {
pe__set_raw_action_flags(flags, action->rsc->id, pcmk_action_runnable);
}
return flags;
}
/*!
* \internal
* \brief Get action UUID that should be used with a resource ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the UUID and resource of the first action in an
* ordering, this returns the UUID of the action that should actually be used
* for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
*
* \param[in] first_uuid UUID of first action in ordering
* \param[in] first_rsc Resource of first action in ordering
*
* \return Newly allocated copy of UUID to use with ordering
* \note It is the caller's responsibility to free the return value.
*/
static char *
action_uuid_for_ordering(const char *first_uuid,
const pcmk_resource_t *first_rsc)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *first_task_str = NULL;
enum action_tasks first_task = pcmk_action_unspecified;
enum action_tasks remapped_task = pcmk_action_unspecified;
// Only non-notify actions for collective resources need remapping
if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
|| (first_rsc->variant < pcmk_rsc_variant_group)) {
goto done;
}
// Only non-recurring actions need remapping
CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
if (interval_ms > 0) {
goto done;
}
first_task = text2task(first_task_str);
switch (first_task) {
case pcmk_action_stop:
case pcmk_action_start:
case pcmk_action_notify:
case pcmk_action_promote:
case pcmk_action_demote:
remapped_task = first_task + 1;
break;
case pcmk_action_stopped:
case pcmk_action_started:
case pcmk_action_notified:
case pcmk_action_promoted:
case pcmk_action_demoted:
remapped_task = first_task;
break;
case pcmk_action_monitor:
case pcmk_action_shutdown:
case pcmk_action_fence:
break;
default:
crm_err("Unknown action '%s' in ordering", first_task_str);
break;
}
if (remapped_task != pcmk_action_unspecified) {
/* If a clone or bundle has notifications enabled, the ordering will be
* relative to when notifications have been sent for the remapped task.
*/
if (pcmk_is_set(first_rsc->flags, pcmk_rsc_notify)
&& (pe_rsc_is_clone(first_rsc) || pe_rsc_is_bundled(first_rsc))) {
uuid = pcmk__notify_key(rid, "confirmed-post",
task2text(remapped_task));
} else {
uuid = pcmk__op_key(rid, task2text(remapped_task), 0);
}
pe_rsc_trace(first_rsc,
"Remapped action UUID %s to %s for ordering purposes",
first_uuid, uuid);
}
done:
if (uuid == NULL) {
uuid = strdup(first_uuid);
CRM_ASSERT(uuid != NULL);
}
free(first_task_str);
free(rid);
return uuid;
}
/*!
* \internal
* \brief Get actual action that should be used with an ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the first action in an ordering, this returns the
* the action that should actually be used for ordering (for example, the
* started action instead of the start action).
*
* \param[in] action First action in an ordering
*
* \return Actual action that should be used for the ordering
*/
static pcmk_action_t *
action_for_ordering(pcmk_action_t *action)
{
pcmk_action_t *result = action;
pcmk_resource_t *rsc = action->rsc;
if ((rsc != NULL) && (rsc->variant >= pcmk_rsc_variant_group)
&& (action->uuid != NULL)) {
char *uuid = action_uuid_for_ordering(action->uuid, rsc);
result = find_first_action(rsc->actions, uuid, NULL, NULL);
if (result == NULL) {
crm_warn("Not remapping %s to %s because %s does not have "
"remapped action", action->uuid, uuid, rsc->id);
result = action;
}
free(uuid);
}
return result;
}
/*!
* \internal
* \brief Wrapper for update_ordered_actions() method for readability
*
* \param[in,out] rsc Resource to call method for
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this
* node (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates
* (may include pcmk_action_optional to affect only
* mandatory actions, and pe_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static inline uint32_t
update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
return rsc->cmds->update_ordered_actions(first, then, node, flags, filter,
type, scheduler);
}
/*!
* \internal
* \brief Update flags for ordering's actions appropriately for ordering's flags
*
* \param[in,out] first First action in an ordering
* \param[in,out] then Then action in an ordering
* \param[in] first_flags Action flags for \p first for ordering purposes
* \param[in] then_flags Action flags for \p then for ordering purposes
* \param[in,out] order Action wrapper for \p first in ordering
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags
*/
static uint32_t
update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
uint32_t first_flags, uint32_t then_flags,
pe_action_wrapper_t *order,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
/* The node will only be used for clones. If interleaved, node will be NULL,
* otherwise the ordering scope will be limited to the node. Normally, the
* whole 'then' clone should restart if 'first' is restarted, so then->node
* is needed.
*/
pcmk_node_t *node = then->node;
if (pcmk_is_set(order->type, pcmk__ar_first_implies_same_node_then)) {
/* For unfencing, only instances of 'then' on the same node as 'first'
* (the unfencing operation) should restart, so reset node to
* first->node, at which point this case is handled like a normal
* pcmk__ar_first_implies_then.
*/
pe__clear_order_flags(order->type,
pcmk__ar_first_implies_same_node_then);
pe__set_order_flags(order->type, pcmk__ar_first_implies_then);
node = first->node;
pe_rsc_trace(then->rsc,
"%s then %s: mapped pcmk__ar_first_implies_same_node_then "
"to pcmk__ar_first_implies_then on %s",
first->uuid, then->uuid, pe__node_name(node));
}
if (pcmk_is_set(order->type, pcmk__ar_first_implies_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk_action_optional,
pcmk_action_optional, pcmk__ar_first_implies_then,
scheduler);
} else if (!pcmk_is_set(first_flags, pcmk_action_optional)
&& pcmk_is_set(then->flags, pcmk_action_optional)) {
pe__clear_action_flags(then, pcmk_action_optional);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pe_rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_implies_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_intermediate_stop)
&& (then->rsc != NULL)) {
enum pe_action_flags restart = pcmk_action_optional
|pcmk_action_runnable;
changed |= update(then->rsc, first, then, node, first_flags, restart,
pcmk__ar_intermediate_stop, scheduler);
pe_rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_intermediate_stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_then_implies_first)) {
if (first->rsc != NULL) {
changed |= update(first->rsc, first, then, node, first_flags,
pcmk_action_optional, pcmk__ar_then_implies_first,
scheduler);
} else if (!pcmk_is_set(first_flags, pcmk_action_optional)
&& pcmk_is_set(first->flags, pcmk_action_runnable)) {
pe__clear_action_flags(first, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
}
pe_rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_promoted_then_implies_first)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk_action_optional,
pcmk_action_optional,
pcmk__ar_promoted_then_implies_first, scheduler);
}
pe_rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_promoted_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_min_runnable)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable, pcmk__ar_min_runnable,
scheduler);
} else if (pcmk_is_set(first_flags, pcmk_action_runnable)) {
// We have another runnable instance of "first"
then->runnable_before++;
/* Mark "then" as runnable if it requires a certain number of
* "before" instances to be runnable, and they now are.
*/
if ((then->runnable_before >= then->required_runnable_before)
&& !pcmk_is_set(then->flags, pcmk_action_runnable)) {
pe__set_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
}
pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_nested_remote_probe)
&& (then->rsc != NULL)) {
if (!pcmk_is_set(first_flags, pcmk_action_runnable)
&& (first->rsc->running_on != NULL)) {
pe_rsc_trace(then->rsc,
"%s then %s: ignoring because first is stopping",
first->uuid, then->uuid);
order->type = (enum pe_ordering) pcmk__ar_none;
} else {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
}
pe_rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_nested_remote_probe",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_unrunnable_first_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk_action_runnable)
&& pcmk_is_set(then->flags, pcmk_action_runnable)) {
pe__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pe_rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_unmigratable_then_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_optional,
pcmk__ar_unmigratable_then_blocks, scheduler);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after "
"pcmk__ar_unmigratable_then_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_first_else_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_optional, pcmk__ar_first_else_then,
scheduler);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_first_else_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_ordered)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable, pcmk__ar_ordered,
scheduler);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->type, pcmk__ar_asymmetric)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk_action_runnable, pcmk__ar_asymmetric,
scheduler);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(first->flags, pcmk_action_runnable)
&& pcmk_is_set(order->type, pcmk__ar_first_implies_then_graphed)
&& !pcmk_is_set(first_flags, pcmk_action_optional)) {
pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
then->uuid, first->uuid);
pe__set_action_flags(then, pcmk_action_always_in_graph);
// Don't bother marking 'then' as changed just for this
}
if (pcmk_is_set(order->type, pcmk__ar_then_implies_first_graphed)
&& !pcmk_is_set(then_flags, pcmk_action_optional)) {
pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
first->uuid, then->uuid);
pe__set_action_flags(first, pcmk_action_always_in_graph);
// Don't bother marking 'first' as changed just for this
}
if (pcmk_any_flags_set(order->type, pcmk__ar_first_implies_then
|pcmk__ar_then_implies_first
|pcmk__ar_intermediate_stop)
&& (first->rsc != NULL)
&& !pcmk_is_set(first->rsc->flags, pcmk_rsc_managed)
&& pcmk_is_set(first->rsc->flags, pcmk_rsc_blocked)
&& !pcmk_is_set(first->flags, pcmk_action_runnable)
&& pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
if (pcmk_is_set(then->flags, pcmk_action_runnable)) {
pe__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first "
"is blocked, unmanaged, unrunnable stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
return changed;
}
// Convenience macros for logging action properties
#define action_type_str(flags) \
(pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
(pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
(pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
/*!
* \internal
* \brief Update an action's flags for all orderings where it is "then"
*
* \param[in,out] then Action to update
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__update_action_for_orderings(pcmk_action_t *then,
pcmk_scheduler_t *scheduler)
{
GList *lpc = NULL;
uint32_t changed = pcmk__updated_none;
int last_flags = then->flags;
pe_rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
action_type_str(then->flags), then->uuid,
action_optional_str(then->flags),
action_runnable_str(then->flags), action_node_str(then));
if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
/* Initialize current known "runnable before" actions. As
* update_action_for_ordering_flags() is called for each of then's
* before actions, this number will increment as runnable 'first'
* actions are encountered.
*/
then->runnable_before = 0;
if (then->required_runnable_before == 0) {
/* @COMPAT This ordering constraint uses the deprecated
* "require-all=false" attribute. Treat it like "clone-min=1".
*/
then->required_runnable_before = 1;
}
/* The pcmk__ar_min_runnable clause of
* update_action_for_ordering_flags() (called below)
* will reset runnable if appropriate.
*/
pe__clear_action_flags(then, pcmk_action_runnable);
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
pcmk_action_t *first = other->action;
pcmk_node_t *then_node = then->node;
pcmk_node_t *first_node = first->node;
if ((first->rsc != NULL)
&& (first->rsc->variant == pcmk_rsc_variant_group)
&& pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
if (first_node != NULL) {
pe_rsc_trace(first->rsc, "Found %s for 'first' %s",
pe__node_name(first_node), first->uuid);
}
}
if ((then->rsc != NULL)
&& (then->rsc->variant == pcmk_rsc_variant_group)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
if (then_node != NULL) {
pe_rsc_trace(then->rsc, "Found %s for 'then' %s",
pe__node_name(then_node), then->uuid);
}
}
// Disable constraint if it only applies when on same node, but isn't
if (pcmk_is_set(other->type, pcmk__ar_if_on_same_node)
&& (first_node != NULL) && (then_node != NULL)
&& !pe__same_node(first_node, then_node)) {
pe_rsc_trace(then->rsc,
"Disabled ordering %s on %s then %s on %s: "
"not same node",
other->action->uuid, pe__node_name(first_node),
then->uuid, pe__node_name(then_node));
other->type = (enum pe_ordering) pcmk__ar_none;
continue;
}
pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
if ((first->rsc != NULL)
&& pcmk_is_set(other->type, pcmk__ar_then_cancels_first)
&& !pcmk_is_set(then->flags, pcmk_action_optional)) {
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any agent reload).
*/
pe__set_action_flags(other->action, pcmk_action_optional);
if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
pe__clear_resource_flags(first->rsc, pcmk_rsc_reload);
}
}
if ((first->rsc != NULL) && (then->rsc != NULL)
&& (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
first = action_for_ordering(first);
}
if (first != other->action) {
pe_rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
then->uuid, first->uuid, other->action->uuid);
}
pe_rsc_trace(then->rsc,
"%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
first->uuid, first->flags, then->uuid, then->flags,
other->type, action_node_str(first));
if (first == other->action) {
/* 'first' was not remapped (e.g. from 'start' to 'running'), which
* could mean it is a non-resource action, a primitive resource
* action, or already expanded.
*/
uint32_t first_flags, then_flags;
first_flags = action_flags_for_ordering(first, then_node);
then_flags = action_flags_for_ordering(then, first_node);
changed |= update_action_for_ordering_flags(first, then,
first_flags, then_flags,
other, scheduler);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
*/
} else if (order_actions(first, then, other->type)) {
/* This was the first time 'first' and 'then' were associated,
* start again to get the new actions_before list
*/
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
pe_rsc_trace(then->rsc,
"Disabled ordering %s then %s in favor of %s then %s",
other->action->uuid, then->uuid, first->uuid,
then->uuid);
other->type = (enum pe_ordering) pcmk__ar_none;
}
if (pcmk_is_set(changed, pcmk__updated_first)) {
crm_trace("Re-processing %s and its 'after' actions "
"because it changed", first->uuid);
for (GList *lpc2 = first->actions_after; lpc2 != NULL;
lpc2 = lpc2->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
pcmk__update_action_for_orderings(first, scheduler);
}
}
if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
if (last_flags == then->flags) {
pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
} else {
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
}
}
if (pcmk_is_set(changed, pcmk__updated_then)) {
crm_trace("Re-processing %s and its 'after' actions because it changed",
then->uuid);
if (pcmk_is_set(last_flags, pcmk_action_runnable)
&& !pcmk_is_set(then->flags, pcmk_action_runnable)) {
pcmk__block_colocation_dependents(then);
}
pcmk__update_action_for_orderings(then, scheduler);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
}
}
static inline bool
is_primitive_action(const pcmk_action_t *action)
{
return (action != NULL) && (action->rsc != NULL)
&& (action->rsc->variant == pcmk_rsc_variant_primitive);
}
/*!
* \internal
* \brief Clear a single action flag and set reason text
*
* \param[in,out] action Action whose flag should be cleared
* \param[in] flag Action flag that should be cleared
* \param[in] reason Action that is the reason why flag is being cleared
*/
#define clear_action_flag_because(action, flag, reason) do { \
if (pcmk_is_set((action)->flags, (flag))) { \
pe__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
pe_action_set_reason((action), reason_text, false); \
free(reason_text); \
} \
} \
} while (0)
/*!
* \internal
* \brief Update actions in an asymmetric ordering
*
* If the "first" action in an asymmetric ordering is unrunnable, make the
* "second" action unrunnable as well, if appropriate.
*
* \param[in] first 'First' action in an asymmetric ordering
* \param[in,out] then 'Then' action in an asymmetric ordering
*/
static void
handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
{
/* Only resource actions after an unrunnable 'first' action need updates for
* asymmetric ordering.
*/
if ((then->rsc == NULL)
|| pcmk_is_set(first->flags, pcmk_action_runnable)) {
return;
}
// Certain optional 'then' actions are unaffected by unrunnable 'first'
if (pcmk_is_set(then->flags, pcmk_action_optional)) {
enum rsc_role_e then_rsc_role = then->rsc->fns->state(then->rsc, TRUE);
if ((then_rsc_role == pcmk_role_stopped)
&& pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* If 'then' should stop after 'first' but is already stopped, the
* ordering is irrelevant.
*/
return;
} else if ((then_rsc_role >= pcmk_role_started)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
&& pe__rsc_running_on_only(then->rsc, then->node)) {
/* Similarly if 'then' should start after 'first' but is already
* started on a single node.
*/
return;
}
}
// 'First' can't run, so 'then' can't either
clear_action_flag_because(then, pcmk_action_optional, first);
clear_action_flag_because(then, pcmk_action_runnable, first);
}
/*!
* \internal
* \brief Set action bits appropriately when pe_restart_order is used
*
* \param[in,out] first 'First' action in an ordering with pe_restart_order
* \param[in,out] then 'Then' action in an ordering with pe_restart_order
* \param[in] filter What action flags to care about
*
* \note pe_restart_order is set for "stop resource before starting it" and
* "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
uint32_t filter)
{
const char *reason = NULL;
CRM_ASSERT(is_primitive_action(first));
CRM_ASSERT(is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(then->flags, pcmk_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pcmk_action_runnable)
&& !pcmk_is_set(then->flags, pcmk_action_runnable)
&& pcmk_is_set(then->rsc->flags, pcmk_rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pcmk_action_runnable)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pcmk_action_migratable)) {
clear_action_flag_because(first, pcmk_action_migratable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pcmk_action_optional)
&& !pcmk_is_set(first->flags, pcmk_action_runnable)) {
clear_action_flag_because(then, pcmk_action_runnable, first);
}
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions' flags
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (ignored)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk_action_optional to affect only
* mandatory actions, and pcmk_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
uint32_t then_flags = 0U;
uint32_t first_flags = 0U;
CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
then_flags = then->flags;
first_flags = first->flags;
if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
handle_asymmetric_ordering(first, then);
}
if (pcmk_is_set(type, pcmk__ar_then_implies_first)
&& !pcmk_is_set(then_flags, pcmk_action_optional)) {
// Then is required, and implies first should be, too
if (pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(flags, pcmk_action_optional)
&& pcmk_is_set(first_flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
if (pcmk_is_set(flags, pcmk_action_migratable)
&& !pcmk_is_set(then->flags, pcmk_action_migratable)) {
clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
&& (then->rsc != NULL) && (then->rsc->role == pcmk_role_promoted)
&& pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(then->flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
if (pcmk_is_set(first->flags, pcmk_action_migratable)
&& !pcmk_is_set(then->flags, pcmk_action_migratable)) {
clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
&& pcmk_is_set(filter, pcmk_action_optional)) {
if (!pcmk_all_flags_set(then->flags, pcmk_action_migratable
|pcmk_action_runnable)) {
clear_action_flag_because(first, pcmk_action_runnable, then);
}
if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
clear_action_flag_because(first, pcmk_action_optional, then);
}
}
if (pcmk_is_set(type, pcmk__ar_first_else_then)
&& pcmk_is_set(filter, pcmk_action_optional)
&& !pcmk_is_set(first->flags, pcmk_action_runnable)) {
clear_action_flag_because(then, pcmk_action_migratable, first);
pe__clear_action_flags(then, pcmk_action_pseudo);
}
if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
&& pcmk_is_set(filter, pcmk_action_runnable)
&& pcmk_is_set(then->flags, pcmk_action_runnable)
&& !pcmk_is_set(flags, pcmk_action_runnable)) {
clear_action_flag_because(then, pcmk_action_runnable, first);
clear_action_flag_because(then, pcmk_action_migratable, first);
}
if (pcmk_is_set(type, pcmk__ar_first_implies_then)
&& pcmk_is_set(filter, pcmk_action_optional)
&& pcmk_is_set(then->flags, pcmk_action_optional)
&& !pcmk_is_set(flags, pcmk_action_optional)
&& !pcmk_is_set(first->flags, pcmk_action_migratable)) {
clear_action_flag_because(then, pcmk_action_optional, first);
}
if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
pe_rsc_trace(then->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'first' %s (%#.6x)",
then->uuid, pe__node_name(then->node),
then->flags, then_flags, first->uuid, first->flags);
if ((then->rsc != NULL) && (then->rsc->parent != NULL)) {
// Required to handle "X_stop then X_start" for cloned groups
pcmk__update_action_for_orderings(then, scheduler);
}
}
if (first_flags != first->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
pe_rsc_trace(first->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'then' %s (%#.6x)",
first->uuid, pe__node_name(first->node),
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
/*!
* \internal
* \brief Trace-log an action (optionally with its dependent actions)
*
* \param[in] pre_text If not NULL, prefix the log with this plus ": "
* \param[in] action Action to log
* \param[in] details If true, recursively log dependent actions
*/
void
pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
const char *desc = NULL;
CRM_CHECK(action != NULL, return);
if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
if (action->node != NULL) {
node_uname = action->node->details->uname;
node_uuid = action->node->details->id;
} else {
node_uname = "<none>";
}
}
switch (text2task(action->task)) {
case pcmk_action_fence:
case pcmk_action_shutdown:
if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
} else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
} else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
default:
if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
} else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
} else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(action->rsc? action->rsc->id : "<none>"),
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
}
if (details) {
const GList *iter = NULL;
const pe_action_wrapper_t *other = NULL;
crm_trace("\t\t====== Preceding Actions");
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
other = (const pe_action_wrapper_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== Subsequent Actions");
for (iter = action->actions_after; iter != NULL; iter = iter->next) {
other = (const pe_action_wrapper_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== End");
} else {
crm_trace("\t\t(before=%d, after=%d)",
g_list_length(action->actions_before),
g_list_length(action->actions_after));
}
}
/*!
* \internal
* \brief Create a new shutdown action for a node
*
* \param[in,out] node Node being shut down
*
* \return Newly created shutdown action for \p node
*/
pcmk_action_t *
pcmk__new_shutdown_action(pcmk_node_t *node)
{
char *shutdown_id = NULL;
pcmk_action_t *shutdown_op = NULL;
CRM_ASSERT(node != NULL);
shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
node->details->uname);
shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
node, FALSE, TRUE, node->details->data_set);
pcmk__order_stops_before_shutdown(node, shutdown_op);
add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
return shutdown_op;
}
/*!
* \internal
* \brief Calculate and add an operation digest to XML
*
* Calculate an operation digest, which enables us to later determine when a
* restart is needed due to the resource's parameters being changed, and add it
* to given XML.
*
* \param[in] op Operation result from executor
* \param[in,out] update XML to add digest to
*/
static void
add_op_digest_to_xml(const lrmd_event_data_t *op, xmlNode *update)
{
char *digest = NULL;
xmlNode *args_xml = NULL;
if (op->params == NULL) {
return;
}
args_xml = create_xml_node(NULL, XML_TAG_PARAMS);
g_hash_table_foreach(op->params, hash2field, args_xml);
pcmk__filter_op_for_digest(args_xml);
digest = calculate_operation_digest(args_xml, NULL);
crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest);
free_xml(args_xml);
free(digest);
}
#define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
/*!
* \internal
* \brief Create XML for resource operation history update
*
* \param[in,out] parent Parent XML node to add to
* \param[in,out] op Operation event data
* \param[in] caller_version DC feature set
* \param[in] target_rc Expected result of operation
* \param[in] node Name of node on which operation was performed
* \param[in] origin Arbitrary description of update source
*
* \return Newly created XML node for history update
*/
xmlNode *
pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
const char *caller_version, int target_rc,
const char *node, const char *origin)
{
char *key = NULL;
char *magic = NULL;
char *op_id = NULL;
char *op_id_additional = NULL;
char *local_user_data = NULL;
const char *exit_reason = NULL;
xmlNode *xml_op = NULL;
const char *task = NULL;
CRM_CHECK(op != NULL, return NULL);
crm_trace("Creating history XML for %s-interval %s action for %s on %s "
"(DC version: %s, origin: %s)",
pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
((node == NULL)? "no node" : node), caller_version, origin);
task = op->op_type;
/* Record a successful agent reload as a start, and a failed one as a
* monitor, to make life easier for the scheduler when determining the
* current state.
*
* @COMPAT We should check "reload" here only if the operation was for a
* pre-OCF-1.1 resource agent, but we don't know that here, and we should
* only ever get results for actions scheduled by us, so we can reasonably
* assume any "reload" is actually a pre-1.1 agent reload.
*/
if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
NULL)) {
if (op->op_status == PCMK_EXEC_DONE) {
task = PCMK_ACTION_START;
} else {
task = PCMK_ACTION_MONITOR;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
CRM_LOG_ASSERT(n_type != NULL);
CRM_LOG_ASSERT(n_task != NULL);
op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
if (op->op_status != PCMK_EXEC_PENDING) {
/* Ignore notify errors.
*
* @TODO It might be better to keep the correct result here, and
* ignore it in process_graph_event().
*/
lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
}
/* Migration history is preserved separately, which usually matters for
* multiple nodes and is important for future cluster transitions.
*/
} else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
op_id = strdup(key);
} else if (did_rsc_op_fail(op, target_rc)) {
op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
if (op->interval_ms == 0) {
// Ensure 'last' gets updated, in case record-pending is true
op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
}
exit_reason = op->exit_reason;
} else if (op->interval_ms > 0) {
op_id = strdup(key);
} else {
op_id = pcmk__op_key(op->rsc_id, "last", 0);
}
again:
xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id);
if (xml_op == NULL) {
xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP);
}
if (op->user_data == NULL) {
crm_debug("Generating fake transition key for: " PCMK__OP_FMT
" %d from %s", op->rsc_id, op->op_type, op->interval_ms,
op->call_id, origin);
local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
FAKE_TE_ID);
op->user_data = local_user_data;
}
if (magic == NULL) {
magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
(const char *) op->user_data);
}
crm_xml_add(xml_op, XML_ATTR_ID, op_id);
crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key);
crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task);
crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin);
crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic);
crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, pcmk__s(exit_reason, ""));
crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); // For context during triage
crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id);
crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc);
crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status);
crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms);
if (compare_version("2.1", caller_version) <= 0) {
if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) {
crm_trace("Timing data (" PCMK__OP_FMT
"): last=%u change=%u exec=%u queue=%u",
op->rsc_id, op->op_type, op->interval_ms,
op->t_run, op->t_rcchange, op->exec_time, op->queue_time);
if ((op->interval_ms != 0) && (op->t_rcchange != 0)) {
// Recurring ops may have changed rc after initial run
crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
(long long) op->t_rcchange);
} else {
crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
(long long) op->t_run);
}
crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time);
crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time);
}
}
if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/*
* Record migrate_source and migrate_target always for migrate ops.
*/
const char *name = XML_LRM_ATTR_MIGRATE_SOURCE;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
name = XML_LRM_ATTR_MIGRATE_TARGET;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
}
add_op_digest_to_xml(op, xml_op);
if (op_id_additional) {
free(op_id);
op_id = op_id_additional;
op_id_additional = NULL;
goto again;
}
if (local_user_data) {
free(local_user_data);
op->user_data = NULL;
}
free(magic);
free(op_id);
free(key);
return xml_op;
}
/*!
* \internal
* \brief Check whether an action shutdown-locks a resource to a node
*
* If the shutdown-lock cluster property is set, resources will not be recovered
* on a different node if cleanly stopped, and may start only on that same node.
* This function checks whether that applies to a given action, so that the
* transition graph can be marked appropriately.
*
* \param[in] action Action to check
*
* \return true if \p action locks its resource to the action's node,
* otherwise false
*/
bool
pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
{
// Only resource actions taking place on resource's lock node are locked
if ((action == NULL) || (action->rsc == NULL)
|| !pe__same_node(action->node, action->rsc->lock_node)) {
return false;
}
/* During shutdown, only stops are locked (otherwise, another action such as
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && (action->task != NULL)
&& (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
return false;
}
return true;
}
/* lowest to highest */
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (action_wrapper1->action->id < action_wrapper2->action->id) {
return 1;
}
if (action_wrapper1->action->id > action_wrapper2->action->id) {
return -1;
}
return 0;
}
/*!
* \internal
* \brief Remove any duplicate action inputs, merging action flags
*
* \param[in,out] action Action whose inputs should be checked
*/
void
pcmk__deduplicate_action_inputs(pcmk_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
pe_action_wrapper_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
next = item->next;
if ((last_input != NULL)
&& (input->action->id == last_input->action->id)) {
crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
input->action->uuid, input->action->id,
action->uuid, action->id);
/* For the purposes of scheduling, the ordering flags no longer
* matter, but crm_simulate looks at certain ones when creating a
* dot graph. Combining the flags is sufficient for that purpose.
*/
last_input->type |= input->type;
if (input->state == pe_link_dumped) {
last_input->state = pe_link_dumped;
}
free(item->data);
action->actions_before = g_list_delete_link(action->actions_before,
item);
} else {
last_input = input;
input->state = pe_link_not_dumped;
}
}
}
/*!
* \internal
* \brief Output all scheduled actions
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__output_actions(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv;
// Output node (non-resource) actions
for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
char *node_name = NULL;
char *task = NULL;
pcmk_action_t *action = (pcmk_action_t *) iter->data;
if (action->rsc != NULL) {
continue; // Resource actions will be output later
} else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
continue; // This action was not scheduled
}
if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
task = strdup("Shutdown");
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta,
"stonith_action");
task = crm_strdup_printf("Fence (%s)", op);
} else {
continue; // Don't display other node action types
}
if (pe__is_guest_node(action->node)) {
const pcmk_resource_t *remote = action->node->details->remote_rsc;
node_name = crm_strdup_printf("%s (resource: %s)",
pe__node_name(action->node),
remote->container->id);
} else if (action->node != NULL) {
node_name = crm_strdup_printf("%s", pe__node_name(action->node));
}
out->message(out, "node-action", task, node_name, action->reason);
free(node_name);
free(task);
}
// Output resource actions
for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->output_actions(rsc);
}
}
/*!
* \internal
* \brief Check whether action from resource history is still in configuration
*
* \param[in] rsc Resource that action is for
* \param[in] task Action's name
* \param[in] interval_ms Action's interval (in milliseconds)
*
- * \return true if action is still in resource configuration, otherwise false
+ * \return true if action is still in resource configuration and enabled,
+ * otherwise false
*/
static bool
action_in_config(const pcmk_resource_t *rsc, const char *task,
guint interval_ms)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
- bool config = (find_rsc_op_entry(rsc, key) != NULL);
+ bool config = (pcmk__find_action_config(rsc, key, false) != NULL);
free(key);
return config;
}
/*!
* \internal
* \brief Get action name needed to compare digest for configuration changes
*
* \param[in] task Action name from history
* \param[in] interval_ms Action interval (in milliseconds)
*
* \return Action name whose digest should be compared
*/
static const char *
task_for_digest(const char *task, guint interval_ms)
{
/* Certain actions need to be compared against the parameters used to start
* the resource.
*/
if ((interval_ms == 0)
&& pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
PCMK_ACTION_PROMOTE, NULL)) {
task = PCMK_ACTION_START;
}
return task;
}
/*!
* \internal
* \brief Check whether only sanitized parameters to an action changed
*
* When collecting CIB files for troubleshooting, crm_report will mask
* sensitive resource parameters. If simulations were run using that, affected
* resources would appear to need a restart, which would complicate
* troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
* parameters. This function used that digest to check whether only masked
* parameters are different.
*
* \param[in] xml_op Resource history entry with secure digest
* \param[in] digest_data Operation digest information being compared
* \param[in] scheduler Scheduler data
*
* \return true if only sanitized parameters changed, otherwise false
*/
static bool
only_sanitized_changed(const xmlNode *xml_op,
const op_digest_cache_t *digest_data,
const pcmk_scheduler_t *scheduler)
{
const char *digest_secure = NULL;
if (!pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)) {
// The scheduler is not being run as a simulation
return false;
}
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
&& (digest_data->digest_secure_calc != NULL)
&& (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
}
/*!
* \internal
* \brief Force a restart due to a configuration change
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action whose configuration changed
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in,out] node Node where resource should be restarted
*/
static void
force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
pcmk_node_t *node)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE,
rsc->cluster);
pe_action_set_reason(required, "resource definition change", true);
trigger_unfencing(rsc, node, "Device parameters changed", NULL,
rsc->cluster);
}
/*!
* \internal
* \brief Schedule a reload of a resource on a node
*
* \param[in,out] data Resource to reload
* \param[in] user_data Where resource should be reloaded
*/
static void
schedule_reload(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
pcmk_action_t *reload = NULL;
// For collective resources, just call recursively for children
if (rsc->variant > pcmk_rsc_variant_primitive) {
g_list_foreach(rsc->children, schedule_reload, user_data);
return;
}
// Skip the reload in certain situations
if ((node == NULL)
|| !pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s",
rsc->id,
pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " unmanaged",
pcmk_is_set(rsc->flags, pcmk_rsc_failed)? " failed" : "",
(node == NULL)? "inactive" : node->details->uname);
return;
}
/* If a resource's configuration changed while a start was pending,
* force a full restart instead of a reload.
*/
if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
rsc->id);
custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE, TRUE,
rsc->cluster);
return;
}
// Schedule the reload
pe__set_resource_flags(rsc, pcmk_rsc_reload);
reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
FALSE, TRUE, rsc->cluster);
pe_action_set_reason(reload, "resource definition change", FALSE);
// Set orderings so that a required stop or demote cancels the reload
pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->cluster);
pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->cluster);
}
/*!
* \internal
* \brief Handle any configuration change for an action
*
* Given an action from resource history, if the resource's configuration
* changed since the action was done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, etc.).
*
* \param[in,out] rsc Resource that action is for
* \param[in,out] node Node that action was on
* \param[in] xml_op Action XML from resource history
*
* \return true if action configuration changed, otherwise false
*/
bool
pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op)
{
guint interval_ms = 0;
const char *task = NULL;
const op_digest_cache_t *digest_data = NULL;
CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
return false);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
CRM_CHECK(task != NULL, return false);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
// If this is a recurring action, check whether it has been orphaned
if (interval_ms > 0) {
if (action_in_config(rsc, task, interval_ms)) {
pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node));
} else if (pcmk_is_set(rsc->cluster->flags,
pcmk_sched_cancel_removed_actions)) {
pcmk__schedule_cancel(rsc,
crm_element_value(xml_op,
XML_LRM_ATTR_CALLID),
task, interval_ms, node, "orphan");
return true;
} else {
pe_rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node));
return true;
}
}
crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node));
task = task_for_digest(task, interval_ms);
digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster);
if (only_sanitized_changed(xml_op, digest_data, rsc->cluster)) {
if (!pcmk__is_daemon && (rsc->cluster->priv != NULL)) {
pcmk__output_t *out = rsc->cluster->priv;
out->info(out,
"Only 'private' parameters to %s-interval %s for %s "
"on %s changed: %s",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node),
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
}
return false;
}
switch (digest_data->rc) {
case pcmk__digest_restart:
crm_log_xml_debug(digest_data->params_restart, "params:restart");
force_restart(rsc, task, interval_ms, node);
return true;
case pcmk__digest_unknown:
case pcmk__digest_mismatch:
// Changes that can potentially be handled by an agent reload
if (interval_ms > 0) {
/* Recurring actions aren't reloaded per se, they are just
* re-scheduled so the next run uses the new parameters.
* The old instance will be cancelled automatically.
*/
crm_log_xml_debug(digest_data->params_all, "params:reschedule");
pcmk__reschedule_recurring(rsc, task, interval_ms, node);
} else if (crm_element_value(xml_op,
XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
// Agent supports reload, so use it
trigger_unfencing(rsc, node,
"Device parameters changed (reload)", NULL,
rsc->cluster);
crm_log_xml_debug(digest_data->params_all, "params:reload");
schedule_reload((gpointer) rsc, (gpointer) node);
} else {
pe_rsc_trace(rsc,
"Restarting %s "
"because agent doesn't support reload", rsc->id);
crm_log_xml_debug(digest_data->params_restart,
"params:restart");
force_restart(rsc, task, interval_ms, node);
}
return true;
default:
break;
}
return false;
}
/*!
* \internal
* \brief Create a list of resource's action history entries, sorted by call ID
*
* \param[in] rsc_entry Resource's <lrm_rsc_op> status XML
* \param[out] start_index Where to store index of start-like action, if any
* \param[out] stop_index Where to store index of stop action, if any
*/
static GList *
rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
{
GList *ops = NULL;
for (xmlNode *rsc_op = first_named_child(rsc_entry, XML_LRM_TAG_RSC_OP);
rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) {
ops = g_list_prepend(ops, rsc_op);
}
ops = g_list_sort(ops, sort_op_by_callid);
calculate_active_ops(ops, start_index, stop_index);
return ops;
}
/*!
* \internal
* \brief Process a resource's action history from the CIB status
*
* Given a resource's action history, if the resource's configuration
* changed since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in] rsc_entry Resource's <lrm_rsc_op> status XML
* \param[in,out] rsc Resource whose history is being processed
* \param[in,out] node Node whose history is being processed
*/
static void
process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
pcmk_node_t *node)
{
int offset = -1;
int stop_index = 0;
int start_index = 0;
GList *sorted_op_list = NULL;
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pe_rsc_is_anon_clone(pe__const_top_resource(rsc, false))) {
pe_rsc_trace(rsc,
"Skipping configuration check "
"for orphaned clone instance %s",
rsc->id);
} else {
pe_rsc_trace(rsc,
"Skipping configuration check and scheduling clean-up "
"for orphaned resource %s", rsc->id);
pcmk__schedule_cleanup(rsc, node, false);
}
return;
}
if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
pcmk__schedule_cleanup(rsc, node, false);
}
pe_rsc_trace(rsc,
"Skipping configuration check for %s "
"because no longer active on %s",
rsc->id, pe__node_name(node));
return;
}
pe_rsc_trace(rsc, "Checking for configuration changes for %s on %s",
rsc->id, pe__node_name(node));
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
pcmk__schedule_cleanup(rsc, node, false);
}
sorted_op_list = rsc_history_as_list(rsc_entry, &start_index, &stop_index);
if (start_index < stop_index) {
return; // Resource is stopped
}
for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
xmlNode *rsc_op = (xmlNode *) iter->data;
const char *task = NULL;
guint interval_ms = 0;
if (++offset < start_index) {
// Skip actions that happened before a start
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if ((interval_ms > 0)
&& (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)
|| node->details->maintenance)) {
// Maintenance mode cancels recurring operations
pcmk__schedule_cancel(rsc,
crm_element_value(rsc_op,
XML_LRM_ATTR_CALLID),
task, interval_ms, node, "maintenance mode");
} else if ((interval_ms > 0)
|| pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START,
PCMK_ACTION_PROMOTE,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't assigned resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
pe__add_param_check(rsc_op, rsc, node, pcmk__check_active,
rsc->cluster);
} else if (pcmk__check_action_config(rsc, node, rsc_op)
&& (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL) != 0)) {
pe__clear_failcount(rsc, node, "action definition changed",
rsc->cluster);
}
}
}
g_list_free(sorted_op_list);
}
/*!
* \internal
* \brief Process a node's action history from the CIB status
*
* Given a node's resource history, if the resource's configuration changed
* since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] node Node whose history is being processed
* \param[in] lrm_rscs Node's <lrm_resources> from CIB status XML
*/
static void
process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
{
crm_trace("Processing node history for %s", pe__node_name(node));
for (const xmlNode *rsc_entry = first_named_child(lrm_rscs,
XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
if (rsc_entry->children != NULL) {
GList *result = pcmk__rscs_matching_id(ID(rsc_entry),
node->details->data_set);
for (GList *iter = result; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->variant == pcmk_rsc_variant_primitive) {
process_rsc_history(rsc_entry, rsc, node);
}
}
g_list_free(result);
}
}
}
// XPath to find a node's resource history
#define XPATH_NODE_HISTORY "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \
"/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
"/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES
/*!
* \internal
* \brief Process any resource configuration changes in the CIB status
*
* Go through all nodes' resource history, and if a resource's configuration
* changed since its actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
{
crm_trace("Check resource and action configuration for changes");
/* Rather than iterate through the status section, iterate through the nodes
* and search for the appropriate status subsection for each. This skips
* orphaned nodes and lets us eliminate some cases before searching the XML.
*/
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* Don't bother checking actions for a node that can't run actions ...
* unless it's in maintenance mode, in which case we still need to
* cancel any existing recurring monitors.
*/
if (node->details->maintenance
|| pcmk__node_available(node, false, false)) {
char *xpath = NULL;
xmlNode *history = NULL;
xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname);
history = get_xpath_object(xpath, scheduler->input, LOG_NEVER);
free(xpath);
process_node_history(node, history);
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_recurring.c b/lib/pacemaker/pcmk_sched_recurring.c
index 1f47359ea0..0e4c9fce8b 100644
--- a/lib/pacemaker/pcmk_sched_recurring.c
+++ b/lib/pacemaker/pcmk_sched_recurring.c
@@ -1,724 +1,726 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <crm/msg_xml.h>
#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
// Information parsed from an operation history entry in the CIB
struct op_history {
// XML attributes
const char *id; // ID of history entry
const char *name; // Action name
// Parsed information
char *key; // Operation key for action
enum rsc_role_e role; // Action role (or pcmk_role_unknown for default)
guint interval_ms; // Action interval
};
/*!
* \internal
* \brief Parse an interval from XML
*
* \param[in] xml XML containing an interval attribute
*
* \return Interval parsed from XML (or 0 as default)
*/
static guint
xe_interval(const xmlNode *xml)
{
return crm_parse_interval_spec(crm_element_value(xml,
XML_LRM_ATTR_INTERVAL));
}
/*!
* \internal
* \brief Check whether an operation exists multiple times in resource history
*
* \param[in] rsc Resource with history to search
* \param[in] name Name of action to search for
* \param[in] interval_ms Interval (in milliseconds) of action to search for
*
* \return true if an operation with \p name and \p interval_ms exists more than
* once in the operation history of \p rsc, otherwise false
*/
static bool
is_op_dup(const pcmk_resource_t *rsc, const char *name, guint interval_ms)
{
const char *id = NULL;
for (xmlNode *op = first_named_child(rsc->ops_xml, "op");
op != NULL; op = crm_next_same_xml(op)) {
// Check whether action name and interval match
if (!pcmk__str_eq(crm_element_value(op, "name"), name, pcmk__str_none)
|| (xe_interval(op) != interval_ms)) {
continue;
}
if (ID(op) == NULL) {
continue; // Shouldn't be possible
}
if (id == NULL) {
id = ID(op); // First matching op
} else {
pcmk__config_err("Operation %s is duplicate of %s (do not use "
"same name and interval combination more "
"than once per resource)", ID(op), id);
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether an action name is one that can be recurring
*
* \param[in] name Action name to check
*
* \return true if \p name is an action known to be unsuitable as a recurring
* operation, otherwise false
*
* \note Pacemaker's current philosophy is to allow users to configure recurring
* operations except for a short list of actions known not to be suitable
* for that (as opposed to allowing only actions known to be suitable,
* which includes only monitor). Among other things, this approach allows
* users to define their own custom operations and make them recurring,
* though that use case is not well tested.
*/
static bool
op_cannot_recur(const char *name)
{
return pcmk__str_any_of(name, PCMK_ACTION_STOP, PCMK_ACTION_START,
PCMK_ACTION_DEMOTE, PCMK_ACTION_PROMOTE,
PCMK_ACTION_RELOAD_AGENT,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
NULL);
}
/*!
* \internal
* \brief Check whether a resource history entry is for a recurring action
*
* \param[in] rsc Resource that history entry is for
* \param[in] xml XML of resource history entry to check
* \param[out] op Where to store parsed info if recurring
*
* \return true if \p xml is for a recurring action, otherwise false
*/
static bool
is_recurring_history(const pcmk_resource_t *rsc, const xmlNode *xml,
struct op_history *op)
{
const char *role = NULL;
op->interval_ms = xe_interval(xml);
if (op->interval_ms == 0) {
return false; // Not recurring
}
op->id = ID(xml);
if (pcmk__str_empty(op->id)) {
pcmk__config_err("Ignoring resource history entry without ID");
return false; // Shouldn't be possible (unless CIB was manually edited)
}
op->name = crm_element_value(xml, "name");
if (op_cannot_recur(op->name)) {
pcmk__config_err("Ignoring %s because %s action cannot be recurring",
op->id, pcmk__s(op->name, "unnamed"));
return false;
}
// There should only be one recurring operation per action/interval
if (is_op_dup(rsc, op->name, op->interval_ms)) {
return false;
}
// Ensure role is valid if specified
role = crm_element_value(xml, "role");
if (role == NULL) {
op->role = pcmk_role_unknown;
} else {
op->role = text2role(role);
if (op->role == pcmk_role_unknown) {
pcmk__config_err("Ignoring %s because %s is not a valid role",
op->id, role);
}
}
- // Disabled resources don't get monitored
+ // Only actions that are still configured and enabled matter
op->key = pcmk__op_key(rsc->id, op->name, op->interval_ms);
- if (find_rsc_op_entry(rsc, op->key) == NULL) {
- crm_trace("Not creating recurring action %s for disabled resource %s",
- op->id, rsc->id);
+ if (pcmk__find_action_config(rsc, op->key, false) == NULL) {
+ pe_rsc_trace(rsc,
+ "Not counting action %s (%s) as recurring because "
+ "it is disabled or no longer in configuration",
+ op->id, op->key);
free(op->key);
return false;
}
return true;
}
/*!
* \internal
* \brief Check whether a recurring action for an active role should be optional
*
* \param[in] rsc Resource that recurring action is for
* \param[in] node Node that \p rsc will be active on (if any)
* \param[in] key Operation key for recurring action to check
* \param[in,out] start Start action for \p rsc
*
* \return true if recurring action should be optional, otherwise false
*/
static bool
active_recurring_should_be_optional(const pcmk_resource_t *rsc,
const pcmk_node_t *node, const char *key,
pcmk_action_t *start)
{
GList *possible_matches = NULL;
if (node == NULL) { // Should only be possible if unmanaged and stopped
pe_rsc_trace(rsc, "%s will be mandatory because resource is unmanaged",
key);
return false;
}
if (!pcmk_is_set(rsc->cmds->action_flags(start, NULL),
pcmk_action_optional)) {
pe_rsc_trace(rsc, "%s will be mandatory because %s is",
key, start->uuid);
return false;
}
possible_matches = find_actions_exact(rsc->actions, key, node);
if (possible_matches == NULL) {
pe_rsc_trace(rsc, "%s will be mandatory because it is not active on %s",
key, pe__node_name(node));
return false;
}
for (const GList *iter = possible_matches;
iter != NULL; iter = iter->next) {
const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
if (pcmk_is_set(op->flags, pcmk_action_reschedule)) {
pe_rsc_trace(rsc,
"%s will be mandatory because "
"it needs to be rescheduled", key);
g_list_free(possible_matches);
return false;
}
}
g_list_free(possible_matches);
return true;
}
/*!
* \internal
* \brief Create recurring action from resource history entry for an active role
*
* \param[in,out] rsc Resource that resource history is for
* \param[in,out] start Start action for \p rsc on \p node
* \param[in] node Node that resource will be active on (if any)
* \param[in] op Resource history entry
*/
static void
recurring_op_for_active(pcmk_resource_t *rsc, pcmk_action_t *start,
const pcmk_node_t *node, const struct op_history *op)
{
pcmk_action_t *mon = NULL;
bool is_optional = true;
const bool is_default_role = (op->role == pcmk_role_unknown);
// We're only interested in recurring actions for active roles
if (op->role == pcmk_role_stopped) {
return;
}
is_optional = active_recurring_should_be_optional(rsc, node, op->key,
start);
if ((!is_default_role && (rsc->next_role != op->role))
|| (is_default_role && (rsc->next_role == pcmk_role_promoted))) {
// Configured monitor role doesn't match role resource will have
if (is_optional) { // It's running, so cancel it
char *after_key = NULL;
pcmk_action_t *cancel_op = pcmk__new_cancel_action(rsc, op->name,
op->interval_ms,
node);
switch (rsc->role) {
case pcmk_role_unpromoted:
case pcmk_role_started:
if (rsc->next_role == pcmk_role_promoted) {
after_key = promote_key(rsc);
} else if (rsc->next_role == pcmk_role_stopped) {
after_key = stop_key(rsc);
}
break;
case pcmk_role_promoted:
after_key = demote_key(rsc);
break;
default:
break;
}
if (after_key) {
pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
do_crm_log((is_optional? LOG_INFO : LOG_TRACE),
"%s recurring action %s because %s configured for %s role "
"(not %s)",
(is_optional? "Cancelling" : "Ignoring"), op->key, op->id,
role2text(is_default_role? pcmk_role_unpromoted : op->role),
role2text(rsc->next_role));
return;
}
pe_rsc_trace(rsc,
"Creating %s recurring action %s for %s (%s %s on %s)",
(is_optional? "optional" : "mandatory"), op->key,
op->id, rsc->id, role2text(rsc->next_role),
pe__node_name(node));
mon = custom_action(rsc, strdup(op->key), op->name, node, is_optional, TRUE,
rsc->cluster);
if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
pe_rsc_trace(rsc, "%s is unrunnable because start is", mon->uuid);
pe__clear_action_flags(mon, pcmk_action_runnable);
} else if ((node == NULL) || !node->details->online
|| node->details->unclean) {
pe_rsc_trace(rsc, "%s is unrunnable because no node is available",
mon->uuid);
pe__clear_action_flags(mon, pcmk_action_runnable);
} else if (!pcmk_is_set(mon->flags, pcmk_action_optional)) {
pe_rsc_info(rsc, "Start %s-interval %s for %s on %s",
pcmk__readable_interval(op->interval_ms), mon->task,
rsc->id, pe__node_name(node));
}
if (rsc->next_role == pcmk_role_promoted) {
pe__add_action_expected_result(mon, CRM_EX_PROMOTED);
}
// Order monitor relative to other actions
if ((node == NULL) || pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__new_ordering(rsc, start_key(rsc), NULL,
NULL, strdup(mon->uuid), mon,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
pcmk__new_ordering(rsc, reload_key(rsc), NULL,
NULL, strdup(mon->uuid), mon,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
if (rsc->next_role == pcmk_role_promoted) {
pcmk__new_ordering(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
pcmk__ar_ordered
|pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
} else if (rsc->role == pcmk_role_promoted) {
pcmk__new_ordering(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
pcmk__ar_ordered
|pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
}
/*!
* \internal
* \brief Cancel a recurring action if running on a node
*
* \param[in,out] rsc Resource that action is for
* \param[in] node Node to cancel action on
* \param[in] key Operation key for action
* \param[in] name Action name
* \param[in] interval_ms Action interval (in milliseconds)
*/
static void
cancel_if_running(pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *key, const char *name, guint interval_ms)
{
GList *possible_matches = find_actions_exact(rsc->actions, key, node);
pcmk_action_t *cancel_op = NULL;
if (possible_matches == NULL) {
return; // Recurring action isn't running on this node
}
g_list_free(possible_matches);
cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
switch (rsc->next_role) {
case pcmk_role_started:
case pcmk_role_unpromoted:
/* Order starts after cancel. If the current role is
* stopped, this cancels the monitor before the resource
* starts; if the current role is started, then this cancels
* the monitor on a migration target before starting there.
*/
pcmk__new_ordering(rsc, NULL, cancel_op,
rsc, start_key(rsc), NULL,
pcmk__ar_unrunnable_first_blocks, rsc->cluster);
break;
default:
break;
}
pe_rsc_info(rsc,
"Cancelling %s-interval %s action for %s on %s because "
"configured for " PCMK__ROLE_STOPPED " role (not %s)",
pcmk__readable_interval(interval_ms), name, rsc->id,
pe__node_name(node), role2text(rsc->next_role));
}
/*!
* \internal
* \brief Order an action after all probes of a resource on a node
*
* \param[in,out] rsc Resource to check for probes
* \param[in] node Node to check for probes of \p rsc
* \param[in,out] action Action to order after probes of \p rsc on \p node
*/
static void
order_after_probes(pcmk_resource_t *rsc, const pcmk_node_t *node,
pcmk_action_t *action)
{
GList *probes = pe__resource_actions(rsc, node, PCMK_ACTION_MONITOR, FALSE);
for (GList *iter = probes; iter != NULL; iter = iter->next) {
order_actions((pcmk_action_t *) iter->data, action,
pcmk__ar_unrunnable_first_blocks);
}
g_list_free(probes);
}
/*!
* \internal
* \brief Order an action after all stops of a resource on a node
*
* \param[in,out] rsc Resource to check for stops
* \param[in] node Node to check for stops of \p rsc
* \param[in,out] action Action to order after stops of \p rsc on \p node
*/
static void
order_after_stops(pcmk_resource_t *rsc, const pcmk_node_t *node,
pcmk_action_t *action)
{
GList *stop_ops = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, TRUE);
for (GList *iter = stop_ops; iter != NULL; iter = iter->next) {
pcmk_action_t *stop = (pcmk_action_t *) iter->data;
if (!pcmk_is_set(stop->flags, pcmk_action_optional)
&& !pcmk_is_set(action->flags, pcmk_action_optional)
&& !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "%s optional on %s: unmanaged",
action->uuid, pe__node_name(node));
pe__set_action_flags(action, pcmk_action_optional);
}
if (!pcmk_is_set(stop->flags, pcmk_action_runnable)) {
crm_debug("%s unrunnable on %s: stop is unrunnable",
action->uuid, pe__node_name(node));
pe__clear_action_flags(action, pcmk_action_runnable);
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__new_ordering(rsc, stop_key(rsc), stop,
NULL, NULL, action,
pcmk__ar_first_implies_then
|pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
g_list_free(stop_ops);
}
/*!
* \internal
* \brief Create recurring action from resource history entry for inactive role
*
* \param[in,out] rsc Resource that resource history is for
* \param[in] node Node that resource will be active on (if any)
* \param[in] op Resource history entry
*/
static void
recurring_op_for_inactive(pcmk_resource_t *rsc, const pcmk_node_t *node,
const struct op_history *op)
{
GList *possible_matches = NULL;
// We're only interested in recurring actions for the inactive role
if (op->role != pcmk_role_stopped) {
return;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
crm_notice("Ignoring %s (recurring monitors for " PCMK__ROLE_STOPPED
" role are not supported for anonymous clones)", op->id);
return; // @TODO add support
}
pe_rsc_trace(rsc, "Creating recurring action %s for %s on nodes "
"where it should not be running", op->id, rsc->id);
for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *stop_node = (pcmk_node_t *) iter->data;
bool is_optional = true;
pcmk_action_t *stopped_mon = NULL;
// Cancel action on node where resource will be active
if ((node != NULL)
&& pcmk__str_eq(stop_node->details->uname, node->details->uname,
pcmk__str_casei)) {
cancel_if_running(rsc, node, op->key, op->name, op->interval_ms);
continue;
}
// Recurring action on this node is optional if it's already active here
possible_matches = find_actions_exact(rsc->actions, op->key, stop_node);
is_optional = (possible_matches != NULL);
g_list_free(possible_matches);
pe_rsc_trace(rsc,
"Creating %s recurring action %s for %s (%s "
PCMK__ROLE_STOPPED " on %s)",
(is_optional? "optional" : "mandatory"),
op->key, op->id, rsc->id, pe__node_name(stop_node));
stopped_mon = custom_action(rsc, strdup(op->key), op->name, stop_node,
is_optional, TRUE, rsc->cluster);
pe__add_action_expected_result(stopped_mon, CRM_EX_NOT_RUNNING);
if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
order_after_probes(rsc, stop_node, stopped_mon);
}
/* The recurring action is for the inactive role, so it shouldn't be
* performed until the resource is inactive.
*/
order_after_stops(rsc, stop_node, stopped_mon);
if (!stop_node->details->online || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s unrunnable on %s: node unavailable)",
stopped_mon->uuid, pe__node_name(stop_node));
pe__clear_action_flags(stopped_mon, pcmk_action_runnable);
}
if (pcmk_is_set(stopped_mon->flags, pcmk_action_runnable)
&& !pcmk_is_set(stopped_mon->flags, pcmk_action_optional)) {
crm_notice("Start recurring %s-interval %s for "
PCMK__ROLE_STOPPED " %s on %s",
pcmk__readable_interval(op->interval_ms),
stopped_mon->task, rsc->id, pe__node_name(stop_node));
}
}
}
/*!
* \internal
* \brief Create recurring actions for a resource
*
* \param[in,out] rsc Resource to create recurring actions for
*/
void
pcmk__create_recurring_actions(pcmk_resource_t *rsc)
{
pcmk_action_t *start = NULL;
if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pe_rsc_trace(rsc, "Skipping recurring actions for blocked resource %s",
rsc->id);
return;
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pe_rsc_trace(rsc, "Skipping recurring actions for %s "
"in maintenance mode", rsc->id);
return;
}
if (rsc->allocated_to == NULL) {
// Recurring actions for active roles not needed
} else if (rsc->allocated_to->details->maintenance) {
pe_rsc_trace(rsc,
"Skipping recurring actions for %s on %s "
"in maintenance mode",
rsc->id, pe__node_name(rsc->allocated_to));
} else if ((rsc->next_role != pcmk_role_stopped)
|| !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Recurring actions for active roles needed
start = start_action(rsc, rsc->allocated_to, TRUE);
}
pe_rsc_trace(rsc, "Creating any recurring actions needed for %s", rsc->id);
for (xmlNode *op = first_named_child(rsc->ops_xml, "op");
op != NULL; op = crm_next_same_xml(op)) {
struct op_history op_history = { NULL, };
if (!is_recurring_history(rsc, op, &op_history)) {
continue;
}
if (start != NULL) {
recurring_op_for_active(rsc, start, rsc->allocated_to, &op_history);
}
recurring_op_for_inactive(rsc, rsc->allocated_to, &op_history);
free(op_history.key);
}
}
/*!
* \internal
* \brief Create an executor cancel action
*
* \param[in,out] rsc Resource of action to cancel
* \param[in] task Name of action to cancel
* \param[in] interval_ms Interval of action to cancel
* \param[in] node Node of action to cancel
*
* \return Created op
*/
pcmk_action_t *
pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *task,
guint interval_ms, const pcmk_node_t *node)
{
pcmk_action_t *cancel_op = NULL;
char *key = NULL;
char *interval_ms_s = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL) && (node != NULL));
// @TODO dangerous if possible to schedule another action with this key
key = pcmk__op_key(rsc->id, task, interval_ms);
cancel_op = custom_action(rsc, key, PCMK_ACTION_CANCEL, node, FALSE, TRUE,
rsc->cluster);
pcmk__str_update(&cancel_op->task, PCMK_ACTION_CANCEL);
pcmk__str_update(&cancel_op->cancel_task, task);
interval_ms_s = crm_strdup_printf("%u", interval_ms);
add_hash_param(cancel_op->meta, XML_LRM_ATTR_TASK, task);
add_hash_param(cancel_op->meta, XML_LRM_ATTR_INTERVAL_MS, interval_ms_s);
free(interval_ms_s);
return cancel_op;
}
/*!
* \internal
* \brief Schedule cancellation of a recurring action
*
* \param[in,out] rsc Resource that action is for
* \param[in] call_id Action's call ID from history
* \param[in] task Action name
* \param[in] interval_ms Action interval
* \param[in] node Node that history entry is for
* \param[in] reason Short description of why action is cancelled
*/
void
pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
const char *task, guint interval_ms,
const pcmk_node_t *node, const char *reason)
{
pcmk_action_t *cancel = NULL;
CRM_CHECK((rsc != NULL) && (task != NULL)
&& (node != NULL) && (reason != NULL),
return);
crm_info("Recurring %s-interval %s for %s will be stopped on %s: %s",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node), reason);
cancel = pcmk__new_cancel_action(rsc, task, interval_ms, node);
add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
// Cancellations happen after stops
pcmk__new_ordering(rsc, stop_key(rsc), NULL, rsc, NULL, cancel,
pcmk__ar_ordered, rsc->cluster);
}
/*!
* \internal
* \brief Create a recurring action marked as needing rescheduling if active
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action being rescheduled
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in,out] node Node where action should be rescheduled
*/
void
pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
guint interval_ms, pcmk_node_t *node)
{
pcmk_action_t *op = NULL;
trigger_unfencing(rsc, node, "Device parameters changed (reschedule)",
NULL, rsc->cluster);
op = custom_action(rsc, pcmk__op_key(rsc->id, task, interval_ms),
task, node, TRUE, TRUE, rsc->cluster);
pe__set_action_flags(op, pcmk_action_reschedule);
}
/*!
* \internal
* \brief Check whether an action is recurring
*
* \param[in] action Action to check
*
* \return true if \p action has a nonzero interval, otherwise false
*/
bool
pcmk__action_is_recurring(const pcmk_action_t *action)
{
guint interval_ms = 0;
if (pcmk__guint_from_hash(action->meta,
XML_LRM_ATTR_INTERVAL_MS, 0,
&interval_ms) != pcmk_rc_ok) {
return false;
}
return (interval_ms > 0);
}
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index 45137cdc02..c003010351 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,1848 +1,1842 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <glib.h>
#include <stdbool.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
const pcmk_resource_t *container,
guint interval_ms);
static void
add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
{
if (scheduler->singletons == NULL) {
scheduler->singletons = pcmk__strkey_table(NULL, NULL);
}
g_hash_table_insert(scheduler->singletons, action->uuid, action);
}
static pcmk_action_t *
lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
{
if (scheduler->singletons == NULL) {
return NULL;
}
return g_hash_table_lookup(scheduler->singletons, action_uuid);
}
/*!
* \internal
* \brief Find an existing action that matches arguments
*
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
* \param[in] scheduler Scheduler data
*
* \return Existing action that matches arguments (or NULL if none)
*/
static pcmk_action_t *
find_existing_action(const char *key, const pcmk_resource_t *rsc,
const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
{
GList *matches = NULL;
pcmk_action_t *action = NULL;
/* When rsc is NULL, it would be quicker to check scheduler->singletons,
* but checking all scheduler->actions takes the node into account.
*/
matches = find_actions(((rsc == NULL)? scheduler->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
}
CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
action = matches->data;
g_list_free(matches);
return action;
}
/*!
* \internal
* \brief Find the XML configuration corresponding to a specific action key
*
* \param[in] rsc Resource to find action configuration for
* \param[in] key "RSC_ACTION_INTERVAL" of action to find
* \param[in] include_disabled If false, do not return disabled actions
*
* \return XML configuration of desired action if any, otherwise NULL
*/
static xmlNode *
find_exact_action_config(const pcmk_resource_t *rsc, const char *key,
bool include_disabled)
{
xmlNode *action_config = NULL;
for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
guint interval_ms = 0;
char *match_key = NULL;
const char *name = crm_element_value(operation, "name");
const char *interval_spec = crm_element_value(operation,
XML_LRM_ATTR_INTERVAL);
// @TODO This does not consider rules, defaults, etc.
if (!include_disabled
&& (pcmk__xe_get_bool_attr(operation, "enabled",
&enabled) == pcmk_rc_ok) && !enabled) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
// Try first with resource ID
match_key = pcmk__op_key(rsc->id, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_none)) {
action_config = operation;
}
free(match_key);
// Then again with clone_name in case ID has instance number
if (rsc->clone_name != NULL) {
match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_none)) {
action_config = operation;
}
free(match_key);
}
if (action_config != NULL) {
break;
}
}
return action_config;
}
/*!
* \internal
* \brief Find the XML configuration of a resource action
*
* \param[in] rsc Resource to find action configuration for
* \param[in] key "RSC_ACTION_INTERVAL" of action to find
* \param[in] include_disabled If false, do not return disabled actions
*
* \return XML configuration of desired action if any, otherwise NULL
*/
-static xmlNode *
-find_rsc_op_entry_helper(const pcmk_resource_t *rsc, const char *key,
+xmlNode *
+pcmk__find_action_config(const pcmk_resource_t *rsc, const char *key,
bool include_disabled)
{
char *retry_key = NULL;
xmlNode *action_config = NULL;
// Try exact key first
action_config = find_exact_action_config(rsc, key, include_disabled);
if (action_config != NULL) {
return action_config;
}
// For migrate_to and migrate_from actions, retry with "migrate"
// @TODO This should be either documented or deprecated
if (pcmk__ends_with(key, "_" PCMK_ACTION_MIGRATE_TO "_0")
|| pcmk__ends_with(key, "_" PCMK_ACTION_MIGRATE_FROM "_0")) {
retry_key = pcmk__op_key(rsc->id, "migrate", 0);
action_config = find_exact_action_config(rsc, retry_key,
include_disabled);
free(retry_key);
if (action_config != NULL) {
return action_config;
}
}
/* If the given key is for one of the many notification pseudo-actions
* (pre_notify_promote, etc.), the actual action name is "notify"
*/
if (strstr(key, "_notify_")) {
retry_key = pcmk__op_key(rsc->id, PCMK_ACTION_NOTIFY, 0);
action_config = find_exact_action_config(rsc, retry_key,
include_disabled);
free(retry_key);
if (action_config != NULL) {
return action_config;
}
}
return NULL;
}
-xmlNode *
-find_rsc_op_entry(const pcmk_resource_t *rsc, const char *key)
-{
- return find_rsc_op_entry_helper(rsc, key, false);
-}
-
/*!
* \internal
* \brief Create a new action object
*
* \param[in] key Action key
* \param[in] task Action name
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in,out] scheduler Scheduler data
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
static pcmk_action_t *
new_action(char *key, const char *task, pcmk_resource_t *rsc,
const pcmk_node_t *node, bool optional, bool for_graph,
pcmk_scheduler_t *scheduler)
{
pcmk_action_t *action = calloc(1, sizeof(pcmk_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
if (node) {
action->node = pe__copy_node(node);
}
if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
pe__set_action_flags(action, pcmk_action_on_dc);
}
pe__set_action_flags(action, pcmk_action_runnable);
if (optional) {
pe__set_action_flags(action, pcmk_action_optional);
} else {
pe__clear_action_flags(action, pcmk_action_optional);
}
if (rsc == NULL) {
action->meta = pcmk__strkey_table(free, free);
} else {
guint interval_ms = 0;
- action->op_entry = find_rsc_op_entry_helper(rsc, key, true);
+ action->op_entry = pcmk__find_action_config(rsc, key, true);
parse_op_key(key, NULL, NULL, &interval_ms);
unpack_operation(action, action->op_entry, rsc->container, interval_ms);
}
if (for_graph) {
pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
(optional? "optional" : "required"),
scheduler->action_id, key, task,
((rsc == NULL)? "no resource" : rsc->id),
pe__node_name(node));
action->id = scheduler->action_id++;
scheduler->actions = g_list_prepend(scheduler->actions, action);
if (rsc == NULL) {
add_singleton(scheduler, action);
} else {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
return action;
}
/*!
* \internal
* \brief Unpack a resource's action-specific instance parameters
*
* \param[in] action_xml XML of action's configuration in CIB (if any)
* \param[in,out] node_attrs Table of node attributes (for rule evaluation)
* \param[in,out] scheduler Cluster working set (for rule evaluation)
*
* \return Newly allocated hash table of action-specific instance parameters
*/
GHashTable *
pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
GHashTable *node_attrs,
pcmk_scheduler_t *scheduler)
{
GHashTable *params = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = node_attrs,
.role = pcmk_role_unknown,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe__unpack_dataset_nvpairs(action_xml, XML_TAG_ATTR_SETS,
&rule_data, params, NULL,
FALSE, scheduler);
return params;
}
/*!
* \internal
* \brief Update an action's optional flag
*
* \param[in,out] action Action to update
* \param[in] optional Requested optional status
*/
static void
update_action_optional(pcmk_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
&& !pcmk_is_set(action->flags, pcmk_action_pseudo)
&& !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
pe__set_action_flags(action, pcmk_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
pe__clear_action_flags(action, pcmk_action_optional);
}
}
static enum pe_quorum_policy
effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
enum pe_quorum_policy policy = scheduler->no_quorum_policy;
if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
policy = pcmk_no_quorum_ignore;
} else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
switch (rsc->role) {
case pcmk_role_promoted:
case pcmk_role_unpromoted:
if (rsc->next_role > pcmk_role_unpromoted) {
pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
policy = pcmk_no_quorum_ignore;
break;
default:
policy = pcmk_no_quorum_stop;
break;
}
}
return policy;
}
/*!
* \internal
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
update_resource_action_runnable(pcmk_action_t *action, bool for_graph,
pcmk_scheduler_t *scheduler)
{
if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
pe__clear_action_flags(action, pcmk_action_runnable);
} else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
pe__clear_action_flags(action, pcmk_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& for_graph
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
}
} else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& action->node->details->pending) {
pe__clear_action_flags(action, pcmk_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
} else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
&& !pe_can_fence(scheduler, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
* host is unclean and cannot be fenced.
*/
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pcmk_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
pe__set_action_flags(action, pcmk_action_runnable);
}
} else {
switch (effective_quorum_policy(action->rsc, scheduler)) {
case pcmk_no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
case pcmk_no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
pe__set_action_flags(action, pcmk_action_runnable);
break;
}
}
}
/*!
* \internal
* \brief Update a resource object's flags for a new action on it
*
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] action New action
*/
static void
update_resource_flags_for_action(pcmk_resource_t *rsc,
const pcmk_action_t *action)
{
/* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
* within Pacemaker, and will eventually be removed
*/
if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pcmk_rsc_stopping);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
pe__set_resource_flags(rsc, pcmk_rsc_starting);
} else {
pe__clear_resource_flags(rsc, pcmk_rsc_starting);
}
}
}
static bool
valid_stop_on_fail(const char *value)
{
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
/*!
* \internal
* \brief Validate (and possibly reset) resource action's on_fail meta-attribute
*
* \param[in] rsc Resource that action is for
* \param[in] action_name Action name
* \param[in] action_config Action configuration XML from CIB (if any)
* \param[in,out] meta Table of action meta-attributes
*/
static void
validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
const char *interval_spec = NULL;
const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
char *key = NULL;
char *new_value = NULL;
// Stop actions can only use certain on-fail values
if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
"allowed for stop", rsc->id, value);
g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
return;
}
/* Demote actions default on-fail to the on-fail value for the first
* recurring monitor for the promoted role (if any).
*/
if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
&& (value == NULL)) {
/* @TODO This does not consider promote options set in a meta-attribute
* block (which may have rules that need to be evaluated) rather than
* XML properties.
*/
for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
const char *promote_on_fail = NULL;
/* We only care about explicit on-fail (if promote uses default, so
* can demote)
*/
promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
if (promote_on_fail == NULL) {
continue;
}
// We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
|| !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
}
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
}
// We only care about enabled monitors
if ((pcmk__xe_get_bool_attr(operation, "enabled",
&enabled) == pcmk_rc_ok) && !enabled) {
continue;
}
// Demote actions can't default to on-fail="demote"
if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
// Use value from first applicable promote action found
key = strdup(XML_OP_ATTR_ON_FAIL);
new_value = strdup(promote_on_fail);
CRM_ASSERT((key != NULL) && (new_value != NULL));
g_hash_table_insert(meta, key, new_value);
}
return;
}
if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
&& !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
key = strdup(XML_OP_ATTR_ON_FAIL);
new_value = strdup("ignore");
CRM_ASSERT((key != NULL) && (new_value != NULL));
g_hash_table_insert(meta, key, new_value);
return;
}
// on-fail="demote" is allowed only for certain actions
if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
name = crm_element_value(action_config, "name");
role = crm_element_value(action_config, "role");
interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
&& (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
|| !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
"allowed for it", rsc->id, name);
g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
return;
}
}
}
static int
unpack_timeout(const char *value)
{
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
// true if value contains valid, non-NULL interval origin for recurring op
static bool
unpack_interval_origin(const char *value, const xmlNode *xml_obj,
guint interval_ms, const crm_time_t *now,
long long *start_delay)
{
long long result = 0;
guint interval_sec = interval_ms / 1000;
crm_time_t *origin = NULL;
// Ignore unspecified values and non-recurring operations
if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
return false;
}
// Parse interval origin from text
origin = crm_time_new(value);
if (origin == NULL) {
pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
"'%s' because '%s' is not valid",
(ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
return false;
}
// Get seconds since origin (negative if origin is in the future)
result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
crm_time_free(origin);
// Calculate seconds from closest interval to now
result = result % interval_sec;
// Calculate seconds remaining until next interval
result = ((result <= 0)? 0 : interval_sec) - result;
crm_info("Calculated a start delay of %llds for operation '%s'",
result,
(ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
if (start_delay != NULL) {
*start_delay = result * 1000; // milliseconds
}
return true;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY),
pcmk__itoa(start_delay));
}
}
return start_delay;
}
/*!
* \internal
* \brief Find a resource's most frequent recurring monitor
*
* \param[in] rsc Resource to check
*
* \return Operation XML configured for most frequent recurring monitor for
* \p rsc (if any)
*/
static xmlNode *
most_frequent_monitor(const pcmk_resource_t *rsc)
{
guint min_interval_ms = G_MAXUINT;
xmlNode *op = NULL;
for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
guint interval_ms = 0;
const char *interval_spec = crm_element_value(operation,
XML_LRM_ATTR_INTERVAL);
// We only care about enabled recurring monitors
if (!pcmk__str_eq(crm_element_value(operation, "name"),
PCMK_ACTION_MONITOR, pcmk__str_none)) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms == 0) {
continue;
}
// @TODO This does not account for rules, defaults, etc.
if ((pcmk__xe_get_bool_attr(operation, "enabled",
&enabled) == pcmk_rc_ok) && !enabled) {
continue;
}
if (interval_ms < min_interval_ms) {
min_interval_ms = interval_ms;
op = operation;
}
}
return op;
}
/*!
* \internal
* \brief Unpack action meta-attributes
*
* \param[in,out] rsc Resource that action is for
* \param[in] node Node that action is on
* \param[in] action_name Action name
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in] action_config Action XML configuration from CIB (if any)
*
* Unpack a resource action's meta-attributes (normalizing the interval,
* timeout, and start delay values as integer milliseconds) from its CIB XML
* configuration (including defaults).
*
* \return Newly allocated hash table with normalized action meta-attributes
*/
GHashTable *
pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *action_name, guint interval_ms,
const xmlNode *action_config)
{
GHashTable *meta = NULL;
char *name = NULL;
char *value = NULL;
const char *timeout_spec = NULL;
const char *str = NULL;
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE),
};
pe_op_eval_data_t op_rule_data = {
.op_name = action_name,
.interval = interval_ms,
};
pe_rule_eval_data_t rule_data = {
.node_hash = (node == NULL)? NULL : node->details->attrs,
.role = pcmk_role_unknown,
.now = rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = &op_rule_data,
};
meta = pcmk__strkey_table(free, free);
// Cluster-wide <op_defaults> <meta_attributes>
pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults, XML_TAG_META_SETS,
&rule_data, meta, NULL, FALSE, rsc->cluster);
// Derive default timeout for probes from recurring monitor timeouts
if (pcmk_is_probe(action_name, interval_ms)) {
xmlNode *min_interval_mon = most_frequent_monitor(rsc);
if (min_interval_mon != NULL) {
/* @TODO This does not consider timeouts set in meta_attributes
* blocks (which may also have rules that need to be evaluated).
*/
timeout_spec = crm_element_value(min_interval_mon,
XML_ATTR_TIMEOUT);
if (timeout_spec != NULL) {
pe_rsc_trace(rsc,
"Setting default timeout for %s probe to "
"most frequent monitor's timeout '%s'",
rsc->id, timeout_spec);
name = strdup(XML_ATTR_TIMEOUT);
value = strdup(timeout_spec);
CRM_ASSERT((name != NULL) && (value != NULL));
g_hash_table_insert(meta, name, value);
}
}
}
if (action_config != NULL) {
// <op> <meta_attributes> take precedence over defaults
pe__unpack_dataset_nvpairs(action_config, XML_TAG_META_SETS, &rule_data,
meta, NULL, TRUE, rsc->cluster);
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
* (See below for the only exception, fence device start/probe timeout.)
*/
for (xmlAttrPtr attr = action_config->properties;
attr != NULL; attr = attr->next) {
name = strdup((const char *) attr->name);
value = strdup(pcmk__xml_attr_value(attr));
CRM_ASSERT((name != NULL) && (value != NULL));
g_hash_table_insert(meta, name, value);
}
}
g_hash_table_remove(meta, XML_ATTR_ID);
// Normalize interval to milliseconds
if (interval_ms > 0) {
name = strdup(XML_LRM_ATTR_INTERVAL);
CRM_ASSERT(name != NULL);
value = crm_strdup_printf("%u", interval_ms);
g_hash_table_insert(meta, name, value);
} else {
g_hash_table_remove(meta, XML_LRM_ATTR_INTERVAL);
}
/* Timeout order of precedence (highest to lowest):
* 1. pcmk_monitor_timeout resource parameter (only for starts and probes
* when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
* monitors via the executor instead)
* 2. timeout configured in <op> (with <op timeout> taking precedence over
* <op> <meta_attributes>)
* 3. timeout configured in <op_defaults> <meta_attributes>
* 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
*/
// Check for pcmk_monitor_timeout
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
&& (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
|| pcmk_is_probe(action_name, interval_ms))) {
GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
if (timeout_spec != NULL) {
pe_rsc_trace(rsc,
"Setting timeout for %s %s to "
"pcmk_monitor_timeout (%s)",
rsc->id, action_name, timeout_spec);
name = strdup(XML_ATTR_TIMEOUT);
value = strdup(timeout_spec);
CRM_ASSERT((name != NULL) && (value != NULL));
g_hash_table_insert(meta, name, value);
}
}
// Normalize timeout to positive milliseconds
name = strdup(XML_ATTR_TIMEOUT);
CRM_ASSERT(name != NULL);
timeout_spec = g_hash_table_lookup(meta, XML_ATTR_TIMEOUT);
g_hash_table_insert(meta, name, pcmk__itoa(unpack_timeout(timeout_spec)));
// Ensure on-fail has a valid value
validate_on_fail(rsc, action_name, action_config, meta);
// Normalize start-delay
str = g_hash_table_lookup(meta, XML_OP_ATTR_START_DELAY);
if (str != NULL) {
unpack_start_delay(str, meta);
} else {
long long start_delay = 0;
str = g_hash_table_lookup(meta, XML_OP_ATTR_ORIGIN);
if (unpack_interval_origin(str, action_config, interval_ms,
rsc->cluster->now, &start_delay)) {
name = strdup(XML_OP_ATTR_START_DELAY);
CRM_ASSERT(name != NULL);
g_hash_table_insert(meta, name,
crm_strdup_printf("%lld", start_delay));
}
}
return meta;
}
/*!
* \internal
* \brief Unpack action configuration
*
* Unpack a resource action's meta-attributes (normalizing the interval,
* timeout, and start delay values as integer milliseconds), requirements, and
* failure policy from its CIB XML configuration (including defaults).
*
* \param[in,out] action Resource action to unpack into
* \param[in] xml_obj Action configuration XML (NULL for defaults only)
* \param[in] container Resource that contains affected resource, if any
* \param[in] interval_ms How frequently to perform the operation
*/
static void
unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
const pcmk_resource_t *container, guint interval_ms)
{
const char *value = NULL;
action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
action->task, interval_ms, xml_obj);
if (!pcmk__strcase_any_of(action->task, PCMK_ACTION_START,
PCMK_ACTION_PROMOTE, NULL)) {
action->needs = pcmk_requires_nothing;
value = "nothing (not start or promote)";
} else if (pcmk_is_set(action->rsc->flags, pcmk_rsc_needs_fencing)) {
action->needs = pcmk_requires_fencing;
value = "fencing";
} else if (pcmk_is_set(action->rsc->flags, pcmk_rsc_needs_quorum)) {
action->needs = pcmk_requires_quorum;
value = "quorum";
} else {
action->needs = pcmk_requires_nothing;
value = "nothing";
}
pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (value == NULL) {
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_block;
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_fence_node;
value = "node fencing";
if (!pcmk_is_set(action->rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
"operation '%s' to 'stop' because 'fence' is not "
"valid when fencing is disabled", action->uuid);
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_standby_node;
value = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
action->on_fail = pcmk_on_fail_ignore;
value = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_ban;
value = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_restart;
value = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
if (container) {
action->on_fail = pcmk_on_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_demote;
value = "demote instance";
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = pcmk_on_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* For remote nodes, ensure that any failure that results in dropping an
* active connection to the node results in fencing of the node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL)
|| !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed))
&& pe__resource_is_remote_conn(action->rsc)
&& !(pcmk__str_eq(action->task, PCMK_ACTION_MONITOR,
pcmk__str_casei)
&& (interval_ms == 0))
&& !pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
if (!pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)) {
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop unmanaged remote node (enforcing default)";
} else {
if (pcmk_is_set(action->rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
value = "fence remote node (default)";
} else {
value = "recover remote node connection (default)";
}
if (action->rsc->remote_reconnect_ms) {
action->fail_role = pcmk_role_stopped;
}
action->on_fail = pcmk_on_fail_reset_remote;
}
} else if ((value == NULL)
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP,
pcmk__str_casei)) {
if (pcmk_is_set(action->rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
action->on_fail = pcmk_on_fail_fence_node;
value = "resource fence (default)";
} else {
action->on_fail = pcmk_on_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = pcmk_on_fail_restart;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "%s failure handling: %s",
action->uuid, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
if (value) {
pe_warn_once(pcmk__wo_role_after,
"Support for role_after_failure is deprecated and will be removed in a future release");
}
}
if (value != NULL && action->fail_role == pcmk_role_unknown) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == pcmk_role_unknown) {
if (pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
action->fail_role = pcmk_role_unpromoted;
} else {
action->fail_role = pcmk_role_started;
}
}
pe_rsc_trace(action->rsc, "%s failure results in: %s",
action->uuid, role2text(action->fail_role));
}
/*!
* \brief Create or update an action object
*
* \param[in,out] rsc Resource that action is for (if any)
* \param[in,out] key Action key (must be non-NULL)
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] save_action Whether action should be recorded in transition graph
* \param[in,out] scheduler Scheduler data
*
* \return Action object corresponding to arguments (guaranteed not to be
* \c NULL)
* \note This function takes ownership of (and might free) \p key. If
* \p save_action is true, \p scheduler will own the returned action,
* otherwise it is the caller's responsibility to free the return value
* with pe_free_action().
*/
pcmk_action_t *
custom_action(pcmk_resource_t *rsc, char *key, const char *task,
const pcmk_node_t *on_node, gboolean optional,
gboolean save_action, pcmk_scheduler_t *scheduler)
{
pcmk_action_t *action = NULL;
CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL));
if (save_action) {
action = find_existing_action(key, rsc, on_node, scheduler);
}
if (action == NULL) {
action = new_action(key, task, rsc, on_node, optional, save_action,
scheduler);
} else {
free(key);
}
update_action_optional(action, optional);
if (rsc != NULL) {
if ((action->node != NULL) && (action->op_entry != NULL)
&& !pcmk_is_set(action->flags, pcmk_action_attrs_evaluated)) {
GHashTable *attrs = action->node->details->attrs;
if (action->extra != NULL) {
g_hash_table_destroy(action->extra);
}
action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
attrs, scheduler);
pe__set_action_flags(action, pcmk_action_attrs_evaluated);
}
update_resource_action_runnable(action, save_action, scheduler);
if (save_action) {
update_resource_flags_for_action(rsc, action);
}
}
if (action->extra == NULL) {
action->extra = pcmk__strkey_table(free, free);
}
return action;
}
pcmk_action_t *
get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
{
pcmk_action_t *op = lookup_singleton(scheduler, name);
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE,
scheduler);
pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
}
return op;
}
static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
continue;
} else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
PCMK_STONITH_PROVIDES),
PCMK__VALUE_UNFENCING,
pcmk__str_casei)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
static int
node_priority_fencing_delay(const pcmk_node_t *node,
const pcmk_scheduler_t *scheduler)
{
int member_count = 0;
int online_count = 0;
int top_priority = 0;
int lowest_priority = 0;
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
if (scheduler->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
if (node->details->type != pcmk_node_variant_cluster) {
return 0;
}
// No need to request a delay if the fencing target is in our partition
if (node->details->online) {
return 0;
}
for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *n = gIter->data;
if (n->details->type != pcmk_node_variant_cluster) {
continue;
}
member_count ++;
if (n->details->online) {
online_count++;
}
if (member_count == 1
|| n->details->priority > top_priority) {
top_priority = n->details->priority;
}
if (member_count == 1
|| n->details->priority < lowest_priority) {
lowest_priority = n->details->priority;
}
}
// No need to delay if we have more than half of the cluster members
if (online_count > member_count / 2) {
return 0;
}
/* All the nodes have equal priority.
* Any configured corresponding `pcmk_delay_base/max` will be applied. */
if (lowest_priority == top_priority) {
return 0;
}
if (node->details->priority < top_priority) {
return 0;
}
return scheduler->priority_fencing_delay;
}
pcmk_action_t *
pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay,
pcmk_scheduler_t *scheduler)
{
char *op_key = NULL;
pcmk_action_t *stonith_op = NULL;
if(op == NULL) {
op = scheduler->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s",
PCMK_ACTION_STONITH, node->details->uname, op);
stonith_op = lookup_singleton(scheduler, op_key);
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
TRUE, TRUE, scheduler);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
GList *matches = find_unfencing_devices(scheduler->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
data = pe__compare_fencing_digest(match, agent, node,
scheduler);
if (data->rc == pcmk__digest_mismatch) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
if (!pcmk__is_daemon && scheduler->priv != NULL) {
pcmk__output_t *out = scheduler->priv;
out->info(out,
"notice: Unfencing node %s because the "
"definition of %s changed",
pe__node_name(node), match->id);
}
}
pcmk__g_strcat(digests_all,
match->id, ":", agent, ":",
data->digest_all_calc, ",", NULL);
pcmk__g_strcat(digests_secure,
match->id, ":", agent, ":",
data->digest_secure_calc, ",", NULL);
}
key = strdup(XML_OP_ATTR_DIGESTS_ALL);
value = strdup((const char *) digests_all->str);
CRM_ASSERT((key != NULL) && (value != NULL));
g_hash_table_insert(stonith_op->meta, key, value);
g_string_free(digests_all, TRUE);
key = strdup(XML_OP_ATTR_DIGESTS_SECURE);
value = strdup((const char *) digests_secure->str);
CRM_ASSERT((key != NULL) && (value != NULL));
g_hash_table_insert(stonith_op->meta, key, value);
g_string_free(digests_secure, TRUE);
}
} else {
free(op_key);
}
if (scheduler->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
&& (priority_delay
/* The priority delay needs to be recalculated if this function has
* been called by schedule_fencing_and_shutdowns() after node
* priority has already been calculated by native_add_running().
*/
|| g_hash_table_lookup(stonith_op->meta,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
/* Add `priority-fencing-delay` to the fencing op even if it's 0 for
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
scheduler));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
if(optional == FALSE && pe_can_fence(scheduler, node)) {
pe__clear_action_flags(stonith_op, pcmk_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
pe_free_action(pcmk_action_t *action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
int
pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
pcmk_scheduler_t *scheduler)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
const char *timeout_spec = NULL;
int timeout_ms = 0;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
pcmk__str_casei)) {
timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout_spec == NULL && scheduler->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(scheduler->op_defaults, XML_TAG_META_SETS,
&rule_data, action_meta, NULL, FALSE,
scheduler);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
// @TODO check meta-attributes
// @TODO maybe use min-interval monitor timeout as default for monitors
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
g_hash_table_destroy(action_meta);
}
return timeout_ms;
}
enum action_tasks
get_complex_task(const pcmk_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
switch (task) {
case pcmk_action_stopped:
case pcmk_action_started:
case pcmk_action_demoted:
case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
break;
default:
break;
}
}
return task;
}
/*!
* \internal
* \brief Find first matching action in a list
*
* \param[in] input List of actions to search
* \param[in] uuid If not NULL, action must have this UUID
* \param[in] task If not NULL, action must have this action name
* \param[in] on_node If not NULL, action must be on this node
*
* \return First action in list that matches criteria, or NULL if none
*/
pcmk_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
const pcmk_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
} else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GList *
find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, pe__node_name(on_node));
action->node = pe__copy_node(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
result = g_list_prepend(result, action);
}
}
return result;
}
GList *
find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
if (on_node == NULL) {
return NULL;
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
&& pcmk__str_eq(on_node->details->id, action->node->details->id,
pcmk__str_casei)) {
crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
result = g_list_prepend(result, action);
}
}
return result;
}
/*!
* \brief Find all actions of given type for a resource
*
* \param[in] rsc Resource to search
* \param[in] node Find only actions scheduled on this node
* \param[in] task Action name to search for
* \param[in] require_node If TRUE, NULL node or action node will not match
*
* \return List of actions found (or NULL if none)
* \note If node is not NULL and require_node is FALSE, matching actions
* without a node will be assigned to node.
*/
GList *
pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
char *key = pcmk__op_key(rsc->id, task, 0);
if (require_node) {
result = find_actions_exact(rsc->actions, key, node);
} else {
result = find_actions(rsc->actions, key, node);
}
free(key);
return result;
}
/*!
* \internal
* \brief Create an action reason string based on the action itself
*
* \param[in] action Action to create reason string for
* \param[in] flag Action flag that was cleared
*
* \return Newly allocated string suitable for use as action reason
* \note It is the caller's responsibility to free() the result.
*/
char *
pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
case pcmk_action_runnable:
change = "unrunnable";
break;
case pcmk_action_migratable:
change = "unmigrateable";
break;
case pcmk_action_optional:
change = "required";
break;
default:
// Bug: caller passed unsupported flag
CRM_CHECK(change != NULL, change = "");
break;
}
return crm_strdup_printf("%s%s%s %s", change,
(action->rsc == NULL)? "" : " ",
(action->rsc == NULL)? "" : action->rsc->id,
action->task);
}
void pe_action_set_reason(pcmk_action_t *action, const char *reason,
bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
action->uuid, action->reason, pcmk__s(reason, "(none)"));
} else if (action->reason == NULL) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
action->uuid, pcmk__s(reason, "(none)"));
} else {
// crm_assert(action->reason != NULL && !overwrite);
return;
}
pcmk__str_update(&action->reason, reason);
}
/*!
* \internal
* \brief Create an action to clear a resource's history from CIB
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
*/
void
pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
CRM_ASSERT((rsc != NULL) && (node != NULL));
custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
PCMK_ACTION_LRM_DELETE, node, FALSE, TRUE, rsc->cluster);
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
int
pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET);
const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET);
bool same_node = true;
/* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via
* 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c).
*
* In case that any of the lrm_rsc_op entries doesn't have on_node
* attribute, we need to explicitly tell whether the two operations are on
* the same node.
*/
if (a_node == NULL || b_node == NULL) {
same_node = same_node_default;
} else {
same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei);
}
if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unlikely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (a_call_id >= 0 && b_call_id >= 0
&& (!same_node || a_call_id == b_call_id)) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
time_t last_a = -1;
time_t last_b = -1;
crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %lld vs %lld",
(long long) last_a, (long long) last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic a");
}
if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
return pe__is_newer_op(xml_a, xml_b, true);
}
/*!
* \internal
* \brief Create a new pseudo-action for a resource
*
* \param[in,out] rsc Resource to create action for
* \param[in] task Action name
* \param[in] optional Whether action should be considered optional
* \param[in] runnable Whethe action should be considered runnable
*
* \return New action object corresponding to arguments
*/
pcmk_action_t *
pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
pcmk_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
optional, TRUE, rsc->cluster);
pe__set_action_flags(action, pcmk_action_pseudo);
if (runnable) {
pe__set_action_flags(action, pcmk_action_runnable);
}
return action;
}
/*!
* \internal
* \brief Add the expected result to an action
*
* \param[in,out] action Action to add expected result to
* \param[in] expected_result Expected result to add
*
* \note This is more efficient than calling add_hash_param().
*/
void
pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
{
char *name = NULL;
CRM_ASSERT((action != NULL) && (action->meta != NULL));
name = strdup(XML_ATTR_TE_TARGET_RC);
CRM_ASSERT (name != NULL);
g_hash_table_insert(action->meta, name, pcmk__itoa(expected_result));
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Thu, Jul 10, 12:38 AM (5 h, 55 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2009340
Default Alt Text
(199 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment