Page Menu
Home
ClusterLabs Projects
Search
Configure Global Search
Log In
Files
F3871639
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
132 KB
Referenced Files
None
Subscribers
None
View Options
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 6842be526a..3048e1afa7 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,723 +1,722 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
# include <stdint.h>
# include <string.h>
# include <crm/msg_xml.h>
# include <crm/pengine/status.h>
# include <crm/pengine/remote_internal.h>
# include <crm/common/internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
enum pe__clone_flags {
// Whether instances should be started sequentially
pe__clone_ordered = (1 << 0),
// Whether promotion scores have been added
pe__clone_promotion_added = (1 << 1),
// Whether promotion constraints have been added
pe__clone_promotion_constrained = (1 << 2),
};
bool pe__clone_is_ordered(const pe_resource_t *clone);
int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
enum pe__group_flags {
pe__group_ordered = (1 << 0), // Members start sequentially
pe__group_colocated = (1 << 1), // Members must be on same node
};
bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
pe_resource_t *pe__last_group_member(const pe_resource_t *group);
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_err(fmt...) do { \
was_processing_error = TRUE; \
pcmk__config_err(fmt); \
} while (0)
# define pe_warn(fmt...) do { \
was_processing_warning = TRUE; \
pcmk__config_warn(fmt); \
} while (0)
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
#define pe__set_working_set_flags(working_set, flags_to_set) do { \
(working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_working_set_flags(working_set, flags_to_clear) do { \
(working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_resource_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_resource_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_action_flags(action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags(action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
action_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action", action_name, \
(action_flags), \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
action_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", action_name, \
(action_flags), \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_action_flags_as(function, line, action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_order_flags(order_flags, flags_to_set) do { \
order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_order_flags(order_flags, flags_to_clear) do { \
order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_clear), \
#flags_to_clear); \
} while (0)
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = (1 << 0),
pe_wo_restart_type = (1 << 1),
pe_wo_role_after = (1 << 2),
pe_wo_poweroff = (1 << 3),
pe_wo_require_all = (1 << 4),
pe_wo_order_score = (1 << 5),
pe_wo_neg_threshold = (1 << 6),
pe_wo_remove_after = (1 << 7),
pe_wo_ping_node = (1 << 8),
pe_wo_order_inst = (1 << 9),
pe_wo_coloc_inst = (1 << 10),
pe_wo_group_order = (1 << 11),
pe_wo_group_coloc = (1 << 12),
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Warn-once", "logging", pe_wo, \
(pe_wo_bit), #pe_wo_bit); \
} \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
pe_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
GList *node_list_rh; // List of pe_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
uint32_t flags; // Group of enum pe_ordering flags
void *lh_opaque;
pe_resource_t *lh_rsc;
pe_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
pe_resource_t *rh_rsc;
pe_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
bool include_bundle);
int pe__clone_max(const pe_resource_t *clone);
int pe__clone_node_max(const pe_resource_t *clone);
int pe__clone_promoted_max(const pe_resource_t *clone);
int pe__clone_promoted_node_max(const pe_resource_t *clone);
void pe__create_clone_notifications(pe_resource_t *clone);
void pe__free_clone_notification_data(pe_resource_t *clone);
void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
pe_action_t *start, pe_action_t *started,
pe_action_t *stop, pe_action_t *stopped);
pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
bool optional, bool runnable);
void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
bool any_demoting);
bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set);
pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
void pe_metadata(pcmk__output_t *out);
void verify_pe_options(GHashTable * options);
-void common_update_score(pe_resource_t * rsc, const char *id, int score);
void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
int flags);
gboolean native_active(pe_resource_t * rsc, gboolean all);
gboolean group_active(pe_resource_t * rsc, gboolean all);
gboolean clone_active(pe_resource_t * rsc, gboolean all);
gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
//! \deprecated This function will be removed in a future release
void native_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void group_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
const pe_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
char *pe__node_display_name(pe_node_t *node, bool print_detail);
// Clone notifications (pe_notif.c)
void pe__order_notifs_after_fencing(const pe_action_t *action,
pe_resource_t *rsc,
pe_action_t *stonith_op);
static inline const char *
pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
{
return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
}
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_default(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_default(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
void native_free(pe_resource_t * rsc);
void group_free(pe_resource_t * rsc);
void clone_free(pe_resource_t * rsc);
void pe__free_bundle(pe_resource_t *rsc);
enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
gboolean current);
void pe__count_common(pe_resource_t *rsc);
void pe__count_bundle(pe_resource_t *rsc);
void common_free(pe_resource_t * rsc);
pe_node_t *pe__copy_node(const pe_node_t *this_node);
extern time_t get_effective_time(pe_working_set_t * data_set);
/* Failure handling utilities (from failcounts.c) */
// bit flags for fail count handling options
enum pe_fc_flags_e {
pe_fc_default = (1 << 0),
pe_fc_effective = (1 << 1), // don't count expired failures
pe_fc_fillers = (1 << 2), // if container, include filler failures in count
};
int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
time_t *last_failure, uint32_t flags,
const xmlNode *xml_op);
pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
const char *reason,
pe_working_set_t *data_set);
/* Functions for finding/counting a resource's active nodes */
bool pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
pe_node_t **active, unsigned int *count_all,
unsigned int *count_clean);
pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
unsigned int *count);
static inline pe_node_t *
pe__current_node(const pe_resource_t *rsc)
{
return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
GHashTable *pe__node_list2table(const GList *list);
static inline gpointer
pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
{
if (hash) {
return g_hash_table_lookup(hash, key);
}
return NULL;
}
extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
void pe__show_node_weights_as(const char *file, const char *function,
int line, bool to_log, const pe_resource_t *rsc,
const char *comment, GHashTable *nodes,
pe_working_set_t *data_set);
#define pe__show_node_weights(level, rsc, text, nodes, data_set) \
pe__show_node_weights_as(__FILE__, __func__, __LINE__, \
(level), (rsc), (text), (nodes), (data_set))
xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
const pe_node_t *on_node, gboolean optional,
gboolean foo, pe_working_set_t *data_set);
# define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \
optional, TRUE, rsc->cluster);
# define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
# define stopped_action(rsc, node, optional) custom_action( \
rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \
optional, TRUE, rsc->cluster);
# define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \
optional, TRUE, rsc->cluster);
# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
# define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
rsc, start_key(rsc), CRMD_ACTION_START, node, \
optional, TRUE, rsc->cluster)
# define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
# define started_action(rsc, node, optional) custom_action( \
rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \
optional, TRUE, rsc->cluster)
# define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \
optional, TRUE, rsc->cluster)
# define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
# define promoted_action(rsc, node, optional) custom_action( \
rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \
optional, TRUE, rsc->cluster)
# define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \
optional, TRUE, rsc->cluster)
# define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
# define demoted_action(rsc, node, optional) custom_action( \
rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \
optional, TRUE, rsc->cluster)
extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
pe_working_set_t *data_set);
pe_action_t *find_first_action(const GList *input, const char *uuid,
const char *task, const pe_node_t *on_node);
enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pe_node_t *on_node);
GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pe_action_t * action);
void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
const char *tag, pe_working_set_t *data_set);
extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
const char *why);
pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
const char *sub_id);
extern void destroy_ticket(gpointer data);
extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(const pe_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return false;
}
int pe__target_rc_from_xml(const xmlNode *xml_op);
gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
enum rsc_digest_cmp_val {
/*! Digests are the same */
RSC_DIGEST_MATCH = 0,
/*! Params that require a restart changed */
RSC_DIGEST_RESTART,
/*! Some parameter changed. */
RSC_DIGEST_ALL,
/*! rsc op didn't have a digest associated with it, so
* it is unknown if parameters changed or not. */
RSC_DIGEST_UNKNOWN,
};
typedef struct op_digest_cache_s {
enum rsc_digest_cmp_val rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
char *digest_all_calc;
char *digest_secure_calc;
char *digest_restart_calc;
} op_digest_cache_t;
op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
guint *interval_ms,
const pe_node_t *node,
const xmlNode *xml_op,
GHashTable *overrides,
bool calc_secure,
pe_working_set_t *data_set);
void pe__free_digests(gpointer ptr);
op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
const xmlNode *xml_op,
pe_node_t *node,
pe_working_set_t *data_set);
pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay,
pe_working_set_t *data_set);
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
const char *reason, pe_action_t *dependency,
pe_working_set_t *data_set);
char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
void pe__add_action_expected_result(pe_action_t *action, int expected_result);
void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
//! \deprecated This function will be removed in a future release
void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set);
//! \deprecated This function will be removed in a future release
void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
const pe_node_t *node, long options, void *print_data);
int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
const char *name, const pe_node_t *node,
unsigned int options);
int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
const char *name, const pe_node_t *node,
unsigned int options);
GList *pe__bundle_containers(const pe_resource_t *bundle);
int pe__bundle_max(const pe_resource_t *rsc);
int pe__bundle_max_per_node(const pe_resource_t *rsc);
pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
const pe_node_t *node);
bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
pe_working_set_t *data_set,
xmlNode *xml, const char *field);
const char *pe_node_attribute_calculated(const pe_node_t *node,
const char *name,
const pe_resource_t *rsc);
const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
bool pe__is_universal_clone(const pe_resource_t *rsc,
const pe_working_set_t *data_set);
void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
pe_node_t *node, enum pe_check_parameters,
pe_working_set_t *data_set);
void pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*,
const xmlNode*,
enum pe_check_parameters));
void pe__free_param_checks(pe_working_set_t *data_set);
bool pe__shutdown_requested(const pe_node_t *node);
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
/*!
* \internal
* \brief Register xml formatting message functions.
*
* \param[in,out] out Output object to register messages with
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
gboolean overwrite, pe_working_set_t *data_set);
bool pe__resource_is_disabled(const pe_resource_t *rsc);
pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
const pe_node_t *node,
pe_working_set_t *data_set);
GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
bool pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node);
bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
const char *pe__clone_child_id(const pe_resource_t *rsc);
int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
int pe__node_health(pe_node_t *node);
static inline enum pcmk__health_strategy
pe__health_strategy(pe_working_set_t *data_set)
{
return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY));
}
static inline int
pe__health_score(const char *option, pe_working_set_t *data_set)
{
return char2score(pe_pref(data_set->config_hash, option));
}
/*!
* \internal
* \brief Return a string suitable for logging as a node name
*
* \param[in] node Node to return a node name string for
*
* \return Node name if available, otherwise node ID if available,
* otherwise "unspecified node" if node is NULL or "unidentified node"
* if node has neither a name nor ID.
*/
static inline const char *
pe__node_name(const pe_node_t *node)
{
if (node == NULL) {
return "unspecified node";
} else if (node->details->uname != NULL) {
return node->details->uname;
} else if (node->details->id != NULL) {
return node->details->id;
} else {
return "unidentified node";
}
}
/*!
* \internal
* \brief Check whether two node objects refer to the same node
*
* \param[in] node1 First node object to compare
* \param[in] node2 Second node object to compare
*
* \return true if \p node1 and \p node2 refer to the same node
*/
static inline bool
pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
{
return (node1 != NULL) && (node2 != NULL)
&& (node1->details == node2->details);
}
/*!
* \internal
* \brief Get the operation key from an action history entry
*
* \param[in] xml Action history entry
*
* \return Entry's operation key
*/
static inline const char *
pe__xe_history_key(const xmlNode *xml)
{
if (xml == NULL) {
return NULL;
} else {
/* @COMPAT Pacemaker <= 1.1.5 did not add the key, and used the ID
* instead. Checking for that allows us to process old saved CIBs,
* including some regression tests.
*/
const char *key = crm_element_value(xml, XML_LRM_ATTR_TASK_KEY);
return pcmk__str_empty(key)? ID(xml) : key;
}
}
#endif
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index 167c00b561..8e32e1fba1 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -1,1637 +1,1653 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
/* This file is intended for code usable with both clone instances and bundle
* replica containers.
*/
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Check whether a clone or bundle has instances for all available nodes
*
* \param[in] collective Clone or bundle to check
*
* \return true if \p collective has enough instances for all of its available
* allowed nodes, otherwise false
*/
static bool
can_run_everywhere(const pe_resource_t *collective)
{
GHashTableIter iter;
pe_node_t *node = NULL;
int available_nodes = 0;
int max_instances = 0;
switch (collective->variant) {
case pe_clone:
max_instances = pe__clone_max(collective);
break;
case pe_container:
max_instances = pe__bundle_max(collective);
break;
default:
return false; // Not actually possible
}
g_hash_table_iter_init(&iter, collective->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
if (pcmk__node_available(node, false, false)
&& (max_instances < ++available_nodes)) {
return false;
}
}
return true;
}
/*!
* \internal
* \brief Check whether a node is allowed to run an instance
*
* \param[in] instance Clone instance or bundle container to check
* \param[in] node Node to check
* \param[in] max_per_node Maximum number of instances allowed to run on a node
*
* \return true if \p node is allowed to run \p instance, otherwise false
*/
static bool
can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
int max_per_node)
{
pe_node_t *allowed_node = NULL;
if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
instance->id, pe__node_name(node));
return false;
}
if (!pcmk__node_available(node, false, false)) {
pe_rsc_trace(instance,
"%s cannot run on %s: node cannot run resources",
instance->id, pe__node_name(node));
return false;
}
allowed_node = pcmk__top_allowed_node(instance, node);
if (allowed_node == NULL) {
crm_warn("%s cannot run on %s: node not allowed",
instance->id, pe__node_name(node));
return false;
}
if (allowed_node->weight < 0) {
pe_rsc_trace(instance, "%s cannot run on %s: parent score is %s there",
instance->id, pe__node_name(node),
pcmk_readable_score(allowed_node->weight));
return false;
}
if (allowed_node->count >= max_per_node) {
pe_rsc_trace(instance,
"%s cannot run on %s: node already has %d instance%s",
instance->id, pe__node_name(node), max_per_node,
pcmk__plural_s(max_per_node));
return false;
}
pe_rsc_trace(instance, "%s can run on %s (%d already running)",
instance->id, pe__node_name(node), allowed_node->count);
return true;
}
/*!
* \internal
* \brief Ban a clone instance or bundle replica from unavailable allowed nodes
*
* \param[in,out] instance Clone instance or bundle replica to ban
* \param[in] max_per_node Maximum instances allowed to run on a node
*/
static void
ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
{
if (instance->allowed_nodes != NULL) {
GHashTableIter iter;
- const pe_node_t *allowed_node = NULL;
+ pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, instance->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (void **) &allowed_node)) {
- if (!can_run_instance(instance, allowed_node, max_per_node)) {
- // Ban instance (and all its children) from node
- common_update_score(instance, allowed_node->details->id,
- -INFINITY);
+ while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
+ if (!can_run_instance(instance, node, max_per_node)) {
+ pe_rsc_trace(instance, "Banning %s from unavailable node %s",
+ instance->id, pe__node_name(node));
+ node->weight = -INFINITY;
+ for (GList *child_iter = instance->children;
+ child_iter != NULL; child_iter = child_iter->next) {
+ pe_resource_t *child = (pe_resource_t *) child_iter->data;
+ pe_node_t *child_node = NULL;
+
+ child_node = pe_hash_table_lookup(child->allowed_nodes,
+ node->details->id);
+ if (child_node != NULL) {
+ pe_rsc_trace(instance,
+ "Banning %s child %s "
+ "from unavailable node %s",
+ instance->id, child->id,
+ pe__node_name(node));
+ child_node->weight = -INFINITY;
+ }
+ }
}
}
}
}
/*!
* \internal
* \brief Create a hash table with a single node in it
*
* \param[in] node Node to copy into new table
*
* \return Newly created hash table containing a copy of \p node
* \note The caller is responsible for freeing the result with
* g_hash_table_destroy().
*/
static GHashTable *
new_node_table(pe_node_t *node)
{
GHashTable *table = pcmk__strkey_table(NULL, free);
node = pe__copy_node(node);
g_hash_table_insert(table, (gpointer) node->details->id, node);
return table;
}
/*!
* \internal
* \brief Apply a resource's parent's colocation scores to a node table
*
* \param[in] rsc Resource whose colocations should be applied
* \param[in,out] nodes Node table to apply colocations to
*/
static void
apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
{
GList *iter = NULL;
pcmk__colocation_t *colocation = NULL;
/* Because the this_with_colocations() and with_this_colocations() methods
* boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
* we can use those here directly for efficiency.
*/
for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
colocation = (pcmk__colocation_t *) iter->data;
pcmk__add_colocated_node_scores(colocation->primary, rsc->id, nodes,
colocation->node_attribute,
colocation->score / (float) INFINITY,
pcmk__coloc_select_default);
}
for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
colocation = (pcmk__colocation_t *) iter->data;
if (!pcmk__colocation_has_influence(colocation, rsc)) {
continue;
}
pcmk__add_colocated_node_scores(colocation->dependent, rsc->id, nodes,
colocation->node_attribute,
colocation->score / (float) INFINITY,
pcmk__coloc_select_nonnegative);
}
}
/*!
* \internal
* \brief Compare clone or bundle instances based on colocation scores
*
* Determine the relative order in which two clone or bundle instances should be
* assigned to nodes, considering the scores of colocation constraints directly
* or indirectly involving them.
*
* \param[in] instance1 First instance to compare
* \param[in] instance2 Second instance to compare
*
* \return A negative number if \p instance1 should be assigned first,
* a positive number if \p instance2 should be assigned first,
* or 0 if assignment order doesn't matter
*/
static int
cmp_instance_by_colocation(const pe_resource_t *instance1,
const pe_resource_t *instance2)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
pe_node_t *current_node1 = pe__current_node(instance1);
pe_node_t *current_node2 = pe__current_node(instance2);
GHashTable *colocated_scores1 = NULL;
GHashTable *colocated_scores2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
&& (instance2 != NULL) && (instance2->parent != NULL)
&& (current_node1 != NULL) && (current_node2 != NULL));
// Create node tables initialized with each node
colocated_scores1 = new_node_table(current_node1);
colocated_scores2 = new_node_table(current_node2);
// Apply parental colocations
apply_parent_colocations(instance1, &colocated_scores1);
apply_parent_colocations(instance2, &colocated_scores2);
// Find original nodes again, with scores updated for colocations
node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
// Compare nodes by updated scores
if (node1->weight < node2->weight) {
crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
instance1->id, node1->weight, pe__node_name(node1),
instance2->id, node2->weight, pe__node_name(node2));
rc = 1;
} else if (node1->weight > node2->weight) {
crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
instance1->id, node1->weight, pe__node_name(node1),
instance2->id, node2->weight, pe__node_name(node2));
rc = -1;
}
g_hash_table_destroy(colocated_scores1);
g_hash_table_destroy(colocated_scores2);
return rc;
}
/*!
* \internal
* \brief Check whether a resource or any of its children are failed
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc or any of its children are failed, otherwise false
*/
static bool
did_fail(const pe_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
if (did_fail((const pe_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node is allowed to run a resource
*
* \param[in] rsc Resource to check
* \param[in,out] node Node to check (will be set NULL if not allowed)
*
* \return true if *node is either NULL or allowed for \p rsc, otherwise false
*/
static bool
node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
{
if (*node != NULL) {
pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
(*node)->details->id);
if ((allowed == NULL) || (allowed->weight < 0)) {
pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
rsc->id, pe__node_name(*node));
*node = NULL;
return false;
}
}
return true;
}
/*!
* \internal
* \brief Compare two clone or bundle instances' instance numbers
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a's instance number is lower,
* a positive number if \p b's instance number is lower,
* or 0 if their instance numbers are the same
*/
gint
pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
{
const pe_resource_t *instance1 = (const pe_resource_t *) a;
const pe_resource_t *instance2 = (const pe_resource_t *) b;
char *div1 = NULL;
char *div2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
// Clone numbers are after a colon, bundle numbers after a dash
div1 = strrchr(instance1->id, ':');
if (div1 == NULL) {
div1 = strrchr(instance1->id, '-');
}
div2 = strrchr(instance2->id, ':');
if (div2 == NULL) {
div2 = strrchr(instance2->id, '-');
}
CRM_ASSERT((div1 != NULL) && (div2 != NULL));
return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
}
/*!
* \internal
* \brief Compare clone or bundle instances according to assignment order
*
* Compare two clone or bundle instances according to the order they should be
* assigned to nodes, preferring (in order):
*
* - Active instance that is less multiply active
* - Instance that is not active on a disallowed node
* - Instance with higher configured priority
* - Active instance whose current node can run resources
* - Active instance whose parent is allowed on current node
* - Active instance whose current node has fewer other instances
* - Active instance
* - Instance that isn't failed
* - Instance whose colocations result in higher score on current node
* - Instance with lower ID in lexicographic order
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a should be assigned first,
* a positive number if \p b should be assigned first,
* or 0 if assignment order doesn't matter
*/
gint
pcmk__cmp_instance(gconstpointer a, gconstpointer b)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
bool can1 = true;
bool can2 = true;
const pe_resource_t *instance1 = (const pe_resource_t *) a;
const pe_resource_t *instance2 = (const pe_resource_t *) b;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
node1 = instance1->fns->active_node(instance1, &nnodes1, NULL);
node2 = instance2->fns->active_node(instance2, &nnodes2, NULL);
/* If both instances are running and at least one is multiply
* active, prefer instance that's running on fewer nodes.
*/
if ((nnodes1 > 0) && (nnodes2 > 0)) {
if (nnodes1 < nnodes2) {
crm_trace("Assign %s (active on %d) before %s (active on %d): "
"less multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return -1;
} else if (nnodes1 > nnodes2) {
crm_trace("Assign %s (active on %d) after %s (active on %d): "
"more multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return 1;
}
}
/* An instance that is either inactive or active on an allowed node is
* preferred over an instance that is active on a no-longer-allowed node.
*/
can1 = node_is_allowed(instance1, &node1);
can2 = node_is_allowed(instance2, &node2);
if (can1 && !can2) {
crm_trace("Assign %s before %s: not active on a disallowed node",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: active on a disallowed node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher configured priority
if (instance1->priority > instance2->priority) {
crm_trace("Assign %s before %s: priority (%d > %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return -1;
} else if (instance1->priority < instance2->priority) {
crm_trace("Assign %s after %s: priority (%d < %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return 1;
}
// Prefer active instance
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: inactive",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node can run resources
can1 = pcmk__node_available(node1, false, false);
can2 = pcmk__node_available(node2, false, false);
if (can1 && !can2) {
crm_trace("Assign %s before %s: current node can run resources",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: current node can't run resources",
instance1->id, instance2->id);
return 1;
}
// Prefer instance whose parent is allowed to run on instance's current node
node1 = pcmk__top_allowed_node(instance1, node1);
node2 = pcmk__top_allowed_node(instance2, node2);
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: "
"parent not allowed on either instance's current node",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: parent not allowed on current node",
instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: parent allowed on current node",
instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node is running fewer other instances
if (node1->count < node2->count) {
crm_trace("Assign %s before %s: fewer active instances on current node",
instance1->id, instance2->id);
return -1;
} else if (node1->count > node2->count) {
crm_trace("Assign %s after %s: more active instances on current node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance that isn't failed
can1 = did_fail(instance1);
can2 = did_fail(instance2);
if (!can1 && can2) {
crm_trace("Assign %s before %s: not failed",
instance1->id, instance2->id);
return -1;
} else if (can1 && !can2) {
crm_trace("Assign %s after %s: failed",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher cumulative colocation score on current node
rc = cmp_instance_by_colocation(instance1, instance2);
if (rc != 0) {
return rc;
}
// Prefer instance with lower instance number
rc = pcmk__cmp_instance_number(instance1, instance2);
if (rc < 0) {
crm_trace("Assign %s before %s: instance number",
instance1->id, instance2->id);
} else if (rc > 0) {
crm_trace("Assign %s after %s: instance number",
instance1->id, instance2->id);
} else {
crm_trace("No assignment preference for %s vs. %s",
instance1->id, instance2->id);
}
return rc;
}
/*!
* \internal
* \brief Choose a node for an instance
*
* \param[in,out] instance Clone instance or bundle replica container
* \param[in] prefer If not NULL, attempt early assignment to this
* node, if still the best choice; otherwise,
* perform final assignment
* \param[in] max_per_node Assign at most this many instances to one node
*
* \return true if \p instance could be assigned to a node, otherwise false
*/
static bool
assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
int max_per_node)
{
pe_node_t *chosen = NULL;
pe_node_t *allowed = NULL;
CRM_ASSERT(instance != NULL);
pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
((prefer == NULL)? "no node" : prefer->details->uname));
if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
// Instance is already assigned
return instance->fns->location(instance, NULL, FALSE) != NULL;
}
if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
pe_rsc_debug(instance,
"Assignment loop detected involving %s colocations",
instance->id);
return false;
}
if (prefer != NULL) { // Possible early assignment to preferred node
// Get preferred node with instance's scores
allowed = g_hash_table_lookup(instance->allowed_nodes,
prefer->details->id);
if ((allowed == NULL) || (allowed->weight < 0)) {
pe_rsc_trace(instance,
"Not assigning %s to preferred node %s: unavailable",
instance->id, pe__node_name(prefer));
return false;
}
}
ban_unavailable_allowed_nodes(instance, max_per_node);
if (prefer == NULL) { // Final assignment
chosen = instance->cmds->assign(instance, NULL);
} else { // Possible early assignment to preferred node
GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes);
chosen = instance->cmds->assign(instance, prefer);
// Revert nodes if preferred node won't be assigned
if ((chosen != NULL) && (chosen->details != prefer->details)) {
crm_info("Not assigning %s to preferred node %s: %s is better",
instance->id, pe__node_name(prefer),
pe__node_name(chosen));
g_hash_table_destroy(instance->allowed_nodes);
instance->allowed_nodes = backup;
pcmk__unassign_resource(instance);
chosen = NULL;
} else if (backup != NULL) {
g_hash_table_destroy(backup);
}
}
// The parent tracks how many instances have been assigned to each node
if (chosen != NULL) {
allowed = pcmk__top_allowed_node(instance, chosen);
if (allowed == NULL) {
/* The instance is allowed on the node, but its parent isn't. This
* shouldn't be possible if the resource is managed, and we won't be
* able to limit the number of instances assigned to the node.
*/
CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed));
} else {
allowed->count++;
}
}
return chosen != NULL;
}
/*!
* \internal
* \brief Reset the node counts of a resource's allowed nodes to zero
*
* \param[in,out] rsc Resource to reset
*
* \return Number of nodes that are available to run resources
*/
static unsigned int
reset_allowed_node_counts(pe_resource_t *rsc)
{
unsigned int available_nodes = 0;
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
node->count = 0;
if (pcmk__node_available(node, false, false)) {
available_nodes++;
}
}
return available_nodes;
}
/*!
* \internal
* \brief Check whether an instance has a preferred node
*
* \param[in] rsc Clone or bundle being assigned (for logs only)
* \param[in] instance Clone instance or bundle replica container
* \param[in] optimal_per_node Optimal number of instances per node
*
* \return Instance's current node if still available, otherwise NULL
*/
static const pe_node_t *
preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
int optimal_per_node)
{
const pe_node_t *node = NULL;
const pe_node_t *parent_node = NULL;
// Check whether instance is active, healthy, and not yet assigned
if ((instance->running_on == NULL)
|| !pcmk_is_set(instance->flags, pe_rsc_provisional)
|| pcmk_is_set(instance->flags, pe_rsc_failed)) {
return NULL;
}
// Check whether instance's current node can run resources
node = pe__current_node(instance);
if (!pcmk__node_available(node, true, false)) {
pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
instance->id, pe__node_name(node));
return NULL;
}
// Check whether node already has optimal number of instances assigned
parent_node = pcmk__top_allowed_node(instance, node);
if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
pe_rsc_trace(rsc,
"Not assigning %s to %s early "
"(optimal instances already assigned)",
instance->id, pe__node_name(node));
return NULL;
}
return node;
}
/*!
* \internal
* \brief Assign collective instances to nodes
*
* \param[in,out] collective Clone or bundle resource being assigned
* \param[in,out] instances List of clone instances or bundle containers
* \param[in] max_total Maximum instances to assign in total
* \param[in] max_per_node Maximum instances to assign to any one node
*/
void
pcmk__assign_instances(pe_resource_t *collective, GList *instances,
int max_total, int max_per_node)
{
// Reuse node count to track number of assigned instances
unsigned int available_nodes = reset_allowed_node_counts(collective);
int optimal_per_node = 0;
int assigned = 0;
GList *iter = NULL;
pe_resource_t *instance = NULL;
const pe_node_t *current = NULL;
if (available_nodes > 0) {
optimal_per_node = max_total / available_nodes;
}
if (optimal_per_node < 1) {
optimal_per_node = 1;
}
pe_rsc_debug(collective,
"Assigning up to %d %s instance%s to up to %u node%s "
"(at most %d per host, %d optimal)",
max_total, collective->id, pcmk__plural_s(max_total),
available_nodes, pcmk__plural_s(available_nodes),
max_per_node, optimal_per_node);
// Assign as many instances as possible to their current location
for (iter = instances; (iter != NULL) && (assigned < max_total);
iter = iter->next) {
instance = (pe_resource_t *) iter->data;
current = preferred_node(collective, instance, optimal_per_node);
if ((current != NULL)
&& assign_instance(instance, current, max_per_node)) {
pe_rsc_trace(collective, "Assigned %s to current node %s",
instance->id, pe__node_name(current));
assigned++;
}
}
pe_rsc_trace(collective, "Assigned %d of %d instance%s to current node",
assigned, max_total, pcmk__plural_s(max_total));
for (iter = instances; iter != NULL; iter = iter->next) {
instance = (pe_resource_t *) iter->data;
if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
continue; // Already assigned
}
if (instance->running_on != NULL) {
current = pe__current_node(instance);
if (pcmk__top_allowed_node(instance, current) == NULL) {
const char *unmanaged = "";
if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
unmanaged = "Unmanaged resource ";
}
crm_notice("%s%s is running on %s which is no longer allowed",
unmanaged, instance->id, pe__node_name(current));
}
}
if (assigned >= max_total) {
pe_rsc_debug(collective,
"Not assigning %s because maximum %d instances "
"already assigned",
instance->id, max_total);
resource_location(instance, NULL, -INFINITY,
"collective_limit_reached", collective->cluster);
} else if (assign_instance(instance, NULL, max_per_node)) {
assigned++;
}
}
pe_rsc_debug(collective, "Assigned %d of %d possible instance%s of %s",
assigned, max_total, pcmk__plural_s(max_total),
collective->id);
}
enum instance_state {
instance_starting = (1 << 0),
instance_stopping = (1 << 1),
/* This indicates that some instance is restarting. It's not the same as
* instance_starting|instance_stopping, which would indicate that some
* instance is starting, and some instance (not necessarily the same one) is
* stopping.
*/
instance_restarting = (1 << 2),
instance_active = (1 << 3),
instance_all = instance_starting|instance_stopping
|instance_restarting|instance_active,
};
/*!
* \internal
* \brief Check whether an instance is active, starting, and/or stopping
*
* \param[in] instance Clone instance or bundle replica container
* \param[in,out] state Whether any instance is starting, stopping, etc.
*/
static void
check_instance_state(const pe_resource_t *instance, uint32_t *state)
{
const GList *iter = NULL;
uint32_t instance_state = 0; // State of just this instance
// No need to check further if all conditions have already been detected
if (pcmk_all_flags_set(*state, instance_all)) {
return;
}
// If instance is a collective (a cloned group), check its children instead
if (instance->variant > pe_native) {
for (iter = instance->children;
(iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
iter = iter->next) {
check_instance_state((const pe_resource_t *) iter->data, state);
}
return;
}
// If we get here, instance is a primitive
if (instance->running_on != NULL) {
instance_state |= instance_active;
}
// Check each of the instance's actions for runnable start or stop
for (iter = instance->actions;
(iter != NULL) && !pcmk_all_flags_set(instance_state,
instance_starting
|instance_stopping);
iter = iter->next) {
const pe_action_t *action = (const pe_action_t *) iter->data;
const bool optional = pcmk_is_set(action->flags, pe_action_optional);
if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(instance, "Instance is starting due to %s",
action->uuid);
instance_state |= instance_starting;
} else {
pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
action->uuid, instance->id,
(optional? "optional" : "unrunnable"));
}
} else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
/* Only stop actions can be pseudo-actions for primitives. That
* indicates that the node they are on is being fenced, so the stop
* is implied rather than actually executed.
*/
if (!optional
&& pcmk_any_flags_set(action->flags,
pe_action_pseudo|pe_action_runnable)) {
pe_rsc_trace(instance, "Instance is stopping due to %s",
action->uuid);
instance_state |= instance_stopping;
} else {
pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
action->uuid, instance->id,
(optional? "optional" : "unrunnable"));
}
}
}
if (pcmk_all_flags_set(instance_state,
instance_starting|instance_stopping)) {
instance_state |= instance_restarting;
}
*state |= instance_state;
}
/*!
* \internal
* \brief Create actions for collective resource instances
*
* \param[in,out] collective Clone or bundle resource to create actions for
* \param[in,out] instances List of clone instances or bundle containers
*/
void
pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
{
uint32_t state = 0;
pe_action_t *stop = NULL;
pe_action_t *stopped = NULL;
pe_action_t *start = NULL;
pe_action_t *started = NULL;
pe_rsc_trace(collective, "Creating collective instance actions for %s",
collective->id);
// Create actions for each instance appropriate to its variant
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
instance->cmds->create_actions(instance);
check_instance_state(instance, &state);
}
// Create pseudo-actions for rsc start and started
start = pe__new_rsc_pseudo_action(collective, RSC_START,
!pcmk_is_set(state, instance_starting),
true);
started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
!pcmk_is_set(state, instance_starting),
false);
started->priority = INFINITY;
if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
pe__set_action_flags(started, pe_action_runnable);
}
// Create pseudo-actions for rsc stop and stopped
stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
!pcmk_is_set(state, instance_stopping),
true);
stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
!pcmk_is_set(state, instance_stopping),
true);
stopped->priority = INFINITY;
if (!pcmk_is_set(state, instance_restarting)) {
pe__set_action_flags(stop, pe_action_migrate_runnable);
}
if (collective->variant == pe_clone) {
pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
stopped);
}
}
/*!
* \internal
* \brief Get a list of clone instances or bundle replica containers
*
* \param[in] rsc Clone or bundle resource
*
* \return Clone instances if \p rsc is a clone, or a newly created list of
* \p rsc's replica containers if \p rsc is a bundle
* \note The caller must call free_instance_list() on the result when the list
* is no longer needed.
*/
static inline GList *
get_instance_list(const pe_resource_t *rsc)
{
if (rsc->variant == pe_container) {
return pe__bundle_containers(rsc);
} else {
return rsc->children;
}
}
/*!
* \internal
* \brief Free any memory created by get_instance_list()
*
* \param[in] rsc Clone or bundle resource passed to get_instance_list()
* \param[in,out] list Return value of get_instance_list() for \p rsc
*/
static inline void
free_instance_list(const pe_resource_t *rsc, GList *list)
{
if (list != rsc->children) {
g_list_free(list);
}
}
/*!
* \internal
* \brief Check whether an instance is compatible with a role and node
*
* \param[in] instance Clone instance or bundle replica container
* \param[in] node Instance must match this node
* \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return true if \p instance is compatible with \p node and \p role,
* otherwise false
*/
bool
pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
enum rsc_role_e role, bool current)
{
pe_node_t *instance_node = NULL;
CRM_CHECK((instance != NULL) && (node != NULL), return false);
if ((role != RSC_ROLE_UNKNOWN)
&& (role != instance->fns->state(instance, current))) {
pe_rsc_trace(instance,
"%s is not a compatible instance (role is not %s)",
instance->id, role2text(role));
return false;
}
if (!is_set_recursive(instance, pe_rsc_block, true)) {
// We only want instances that haven't failed
instance_node = instance->fns->location(instance, NULL, current);
}
if (instance_node == NULL) {
pe_rsc_trace(instance,
"%s is not a compatible instance (not assigned to a node)",
instance->id);
return false;
}
if (instance_node->details != node->details) {
pe_rsc_trace(instance,
"%s is not a compatible instance (assigned to %s not %s)",
instance->id, pe__node_name(instance_node),
pe__node_name(node));
return false;
}
return true;
}
/*!
* \internal
* \brief Find an instance that matches a given resource by node and role
*
* \param[in] match_rsc Resource that instance must match (for logging only)
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] node Instance must match this node
* \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return \p rsc instance matching \p node and \p role if any, otherwise NULL
*/
static pe_resource_t *
find_compatible_instance_on_node(const pe_resource_t *match_rsc,
const pe_resource_t *rsc,
const pe_node_t *node, enum rsc_role_e role,
bool current)
{
GList *instances = NULL;
instances = get_instance_list(rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *instance = (pe_resource_t *) iter->data;
if (pcmk__instance_matches(instance, node, role, current)) {
pe_rsc_trace(match_rsc, "Found %s %s instance %s compatible with %s on %s",
role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
rsc->id, instance->id, match_rsc->id,
pe__node_name(node));
free_instance_list(rsc, instances); // Only frees list, not contents
return instance;
}
}
free_instance_list(rsc, instances);
pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
rsc->id, match_rsc->id, pe__node_name(node));
return NULL;
}
/*!
* \internal
* \brief Find a clone instance or bundle container compatible with a resource
*
* \param[in] match_rsc Resource that instance must match
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
* if any, otherwise NULL
*/
pe_resource_t *
pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
const pe_resource_t *rsc, enum rsc_role_e role,
bool current)
{
pe_resource_t *instance = NULL;
GList *nodes = NULL;
const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
// If match_rsc has a node, check only that node
if (node != NULL) {
return find_compatible_instance_on_node(match_rsc, rsc, node, role,
current);
}
// Otherwise check for an instance matching any of match_rsc's allowed nodes
nodes = pcmk__sort_nodes(g_hash_table_get_values(match_rsc->allowed_nodes),
NULL);
for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
iter = iter->next) {
instance = find_compatible_instance_on_node(match_rsc, rsc,
(pe_node_t *) iter->data,
role, current);
}
if (instance == NULL) {
pe_rsc_debug(rsc, "No %s instance found compatible with %s",
rsc->id, match_rsc->id);
}
g_list_free(nodes);
return instance;
}
/*!
* \internal
* \brief Unassign an instance if mandatory ordering has no interleave match
*
* \param[in] first 'First' action in an ordering
* \param[in] then 'Then' action in an ordering
* \param[in,out] then_instance 'Then' instance that has no interleave match
* \param[in] type Group of enum pe_ordering flags to apply
* \param[in] current If true, "then" action is stopped or demoted
*
* \return true if \p then_instance was unassigned, otherwise false
*/
static bool
unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
pe_resource_t *then_instance, uint32_t type, bool current)
{
// Allow "then" instance to go down even without an interleave match
if (current) {
pe_rsc_trace(then->rsc,
"%s has no instance to order before stopping "
"or demoting %s",
first->rsc->id, then_instance->id);
/* If the "first" action must be runnable, but there is no "first"
* instance, the "then" instance must not be allowed to come up.
*/
} else if (pcmk_any_flags_set(type, pe_order_runnable_left
|pe_order_implies_then)) {
pe_rsc_info(then->rsc,
"Inhibiting %s from being active "
"because there is no %s instance to interleave",
then_instance->id, first->rsc->id);
return pcmk__assign_resource(then_instance, NULL, true);
}
return false;
}
/*!
* \internal
* \brief Find first matching action for a clone instance or bundle container
*
* \param[in] action Action in an interleaved ordering
* \param[in] instance Clone instance or bundle container being interleaved
* \param[in] action_name Action to look for
* \param[in] node If not NULL, require action to be on this node
* \param[in] for_first If true, \p instance is the 'first' resource in the
* ordering, otherwise it is the 'then' resource
*
* \return First action for \p instance (or in some cases if \p instance is a
* bundle container, its containerized resource) that matches
* \p action_name and \p node if any, otherwise NULL
*/
static pe_action_t *
find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
const char *action_name, const pe_node_t *node,
bool for_first)
{
const pe_resource_t *rsc = NULL;
pe_action_t *matching_action = NULL;
/* If instance is a bundle container, sometimes we should interleave the
* action for the container itself, and sometimes for the containerized
* resource.
*
* For example, given "start bundle A then bundle B", B likely requires the
* service inside A's container to be active, rather than just the
* container, so we should interleave the action for A's containerized
* resource. On the other hand, it's possible B's container itself requires
* something from A, so we should interleave the action for B's container.
*
* Essentially, for 'first', we should use the containerized resource for
* everything except stop, and for 'then', we should use the container for
* everything except promote and demote (which can only be performed on the
* containerized resource).
*/
if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
CRMD_ACTION_STOPPED, NULL))
|| (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
CRMD_ACTION_PROMOTED,
CRMD_ACTION_DEMOTE,
CRMD_ACTION_DEMOTED, NULL))) {
rsc = pcmk__get_rsc_in_container(instance);
}
if (rsc == NULL) {
rsc = instance; // No containerized resource, use instance itself
} else {
node = NULL; // Containerized actions are on bundle-created guest
}
matching_action = find_first_action(rsc->actions, NULL, action_name, node);
if (matching_action != NULL) {
return matching_action;
}
if (pcmk_is_set(instance->flags, pe_rsc_orphan)
|| pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
crm_trace("No %s action found for %s%s",
action_name,
pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
instance->id);
} else {
crm_err("No %s action found for %s to interleave (bug?)",
action_name, instance->id);
}
return NULL;
}
/*!
* \internal
* \brief Get the original action name of a bundle or clone action
*
* Given an action for a bundle or clone, get the original action name,
* mapping notify to the action being notified, and if the instances are
* primitives, mapping completion actions to the action that was completed
* (for example, stopped to stop).
*
* \param[in] action Clone or bundle action to check
*
* \return Original action name for \p action
*/
static const char *
orig_action_name(const pe_action_t *action)
{
const pe_resource_t *instance = action->rsc->children->data; // Any instance
char *action_type = NULL;
const char *action_name = action->task;
enum action_tasks orig_task = no_action;
if (pcmk__strcase_any_of(action->task, CRMD_ACTION_NOTIFY,
CRMD_ACTION_NOTIFIED, NULL)) {
// action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
return task2text(no_action));
action_name = strstr(action_type, "_notify_");
CRM_CHECK(action_name != NULL, return task2text(no_action));
action_name += strlen("_notify_");
}
orig_task = get_complex_task(instance, action_name);
free(action_type);
return task2text(orig_task);
}
/*!
* \internal
* \brief Update two interleaved actions according to an ordering between them
*
* Given information about an ordering of two interleaved actions, update the
* actions' flags (and runnable_before members if appropriate) as appropriate
* for the ordering. Effects may cascade to other orderings involving the
* actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
update_interleaved_actions(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t filter,
uint32_t type)
{
GList *instances = NULL;
uint32_t changed = pcmk__updated_none;
const char *orig_first_task = orig_action_name(first);
// Stops and demotes must be interleaved with instance on current node
bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
|| pcmk__ends_with(first->uuid,
"_" CRMD_ACTION_DEMOTED "_0");
// Update the specified actions for each "then" instance individually
instances = get_instance_list(then->rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *first_instance = NULL;
pe_resource_t *then_instance = iter->data;
pe_action_t *first_action = NULL;
pe_action_t *then_action = NULL;
// Find a "first" instance to interleave with this "then" instance
first_instance = pcmk__find_compatible_instance(then_instance,
first->rsc,
RSC_ROLE_UNKNOWN,
current);
if (first_instance == NULL) { // No instance can be interleaved
if (unassign_if_mandatory(first, then, then_instance, type,
current)) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
continue;
}
first_action = find_instance_action(first, first_instance,
orig_first_task, node, true);
if (first_action == NULL) {
continue;
}
then_action = find_instance_action(then, then_instance, then->task,
node, false);
if (then_action == NULL) {
continue;
}
if (order_actions(first_action, then_action, type)) {
pcmk__set_updated_flags(changed, first,
pcmk__updated_first|pcmk__updated_then);
}
changed |= then_instance->cmds->update_ordered_actions(
first_action, then_action, node,
first_instance->cmds->action_flags(first_action, node), filter,
type, then->rsc->cluster);
}
free_instance_list(then->rsc, instances);
return changed;
}
/*!
* \internal
* \brief Check whether two actions in an ordering can be interleaved
*
* \param[in] first 'First' action in the ordering
* \param[in] then 'Then' action in the ordering
*
* \return true if \p first and \p then can be interleaved, otherwise false
*/
static bool
can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
{
bool interleave = false;
pe_resource_t *rsc = NULL;
if ((first->rsc == NULL) || (then->rsc == NULL)) {
crm_trace("Not interleaving %s with %s: not resource actions",
first->uuid, then->uuid);
return false;
}
if (first->rsc == then->rsc) {
crm_trace("Not interleaving %s with %s: same resource",
first->uuid, then->uuid);
return false;
}
if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
crm_trace("Not interleaving %s with %s: not clones or bundles",
first->uuid, then->uuid);
return false;
}
if (pcmk__ends_with(then->uuid, "_stop_0")
|| pcmk__ends_with(then->uuid, "_demote_0")) {
rsc = first->rsc;
} else {
rsc = then->rsc;
}
interleave = crm_is_true(g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_INTERLEAVE));
pe_rsc_trace(rsc, "'%s then %s' will %sbe interleaved (based on %s)",
first->uuid, then->uuid, (interleave? "" : "not "), rsc->id);
return interleave;
}
/*!
* \internal
* \brief Update non-interleaved instance actions according to an ordering
*
* Given information about an ordering of two non-interleaved actions, update
* the actions' flags (and runnable_before members if appropriate) as
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
* \param[in,out] instance Clone instance or bundle container
* \param[in,out] first "First" action in ordering
* \param[in] then "Then" action in ordering (for \p instance's parent)
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
const pe_action_t *then, const pe_node_t *node,
uint32_t flags, uint32_t filter, uint32_t type)
{
pe_action_t *instance_action = NULL;
uint32_t instance_flags = 0;
uint32_t changed = pcmk__updated_none;
// Check whether instance has an equivalent of "then" action
instance_action = find_first_action(instance->actions, NULL, then->task,
node);
if (instance_action == NULL) {
return changed;
}
// Check whether action is runnable
instance_flags = instance->cmds->action_flags(instance_action, node);
if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
return changed;
}
// If so, update actions for the instance
changed = instance->cmds->update_ordered_actions(first, instance_action,
node, flags, filter, type,
instance->cluster);
// Propagate any changes to later actions
if (pcmk_is_set(changed, pcmk__updated_then)) {
for (GList *after_iter = instance_action->actions_after;
after_iter != NULL; after_iter = after_iter->next) {
pe_action_wrapper_t *after = after_iter->data;
pcmk__update_action_for_orderings(after->action, instance->cluster);
}
}
return changed;
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two clone or bundle actions, update
* the actions' flags (and runnable_before members if appropriate) as
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
* \param[in,out] data_set Cluster working set
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pe_working_set_t *data_set)
{
if (then->rsc == NULL) {
return pcmk__updated_none;
} else if (can_interleave_actions(first, then)) {
return update_interleaved_actions(first, then, node, filter, type);
} else {
uint32_t changed = pcmk__updated_none;
GList *instances = get_instance_list(then->rsc);
// Update actions for the clone or bundle resource itself
changed |= pcmk__update_ordered_actions(first, then, node, flags,
filter, type, data_set);
// Update the 'then' clone instances or bundle containers individually
for (GList *iter = instances; iter != NULL; iter = iter->next) {
pe_resource_t *instance = iter->data;
changed |= update_noninterleaved_actions(instance, first, then,
node, flags, filter, type);
}
free_instance_list(then->rsc, instances);
return changed;
}
}
#define pe__clear_action_summary_flags(flags, action, flag) do { \
flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Action summary", action->rsc->id, \
flags, flag, #flag); \
} while (0)
/*!
* \internal
* \brief Return action flags for a given clone or bundle action
*
* \param[in,out] action Action for a clone or bundle
* \param[in] instances Clone instances or bundle containers
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
enum pe_action_flags
pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
const pe_node_t *node)
{
bool any_runnable = false;
enum pe_action_flags flags;
const char *action_name = orig_action_name(action);
// Set original assumptions (optional and runnable may be cleared below)
flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
for (const GList *iter = instances; iter != NULL; iter = iter->next) {
const pe_resource_t *instance = iter->data;
const pe_node_t *instance_node = NULL;
pe_action_t *instance_action = NULL;
enum pe_action_flags instance_flags;
// Node is relevant only to primitive instances
if (instance->variant == pe_native) {
instance_node = node;
}
instance_action = find_first_action(instance->actions, NULL,
action_name, instance_node);
if (instance_action == NULL) {
pe_rsc_trace(action->rsc, "%s has no %s action on %s",
instance->id, action_name, pe__node_name(node));
continue;
}
pe_rsc_trace(action->rsc, "%s has %s for %s on %s",
instance->id, instance_action->uuid, action_name,
pe__node_name(node));
instance_flags = instance->cmds->action_flags(instance_action, node);
// If any instance action is mandatory, so is the collective action
if (pcmk_is_set(flags, pe_action_optional)
&& !pcmk_is_set(instance_flags, pe_action_optional)) {
pe_rsc_trace(instance, "%s is mandatory because %s is",
action->uuid, instance_action->uuid);
pe__clear_action_summary_flags(flags, action, pe_action_optional);
pe__clear_action_flags(action, pe_action_optional);
}
// If any instance action is runnable, so is the collective action
if (pcmk_is_set(instance_flags, pe_action_runnable)) {
any_runnable = true;
}
}
if (!any_runnable) {
pe_rsc_trace(action->rsc,
"%s is not runnable because no instance can run %s",
action->uuid, action_name);
pe__clear_action_summary_flags(flags, action, pe_action_runnable);
if (node == NULL) {
pe__clear_action_flags(action, pe_action_runnable);
}
}
return flags;
}
/*!
* \internal
* \brief Add a collective resource's colocations to a list for an instance
*
* \param[in,out] list Colocation list to add to
* \param[in] instance Clone or bundle instance or instance group member
* \param[in] collective Clone or bundle resource with colocations to add
* \param[in] with_this If true, add collective's "with this" colocations,
* otherwise add its "this with" colocations
*/
void
pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
const pe_resource_t *collective,
bool with_this)
{
const GList *colocations = NULL;
bool everywhere = false;
CRM_CHECK((list != NULL) && (instance != NULL), return);
if (collective == NULL) {
return;
}
switch (collective->variant) {
case pe_clone:
case pe_container:
break;
default:
return;
}
everywhere = can_run_everywhere(collective);
if (with_this) {
colocations = collective->rsc_cons_lhs;
} else {
colocations = collective->rsc_cons;
}
for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *colocation = iter->data;
if (with_this
&& !pcmk__colocation_has_influence(colocation, instance)) {
continue;
}
if (!everywhere || (colocation->score < 0)
|| (!with_this && (colocation->score == INFINITY))) {
if (with_this) {
pcmk__add_with_this(list, colocation);
} else {
pcmk__add_this_with(list, colocation);
}
}
}
}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 9b9bd71e6c..1c31b1bd40 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1171 +1,1149 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean);
resource_object_functions_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_print,
native_active,
native_resource_state,
native_location,
native_free,
pe__count_common,
pe__native_is_filtered,
active_node,
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_print,
group_active,
group_resource_state,
native_location,
group_free,
pe__count_common,
pe__group_is_filtered,
active_node,
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free,
pe__count_common,
pe__clone_is_filtered,
active_node,
},
{
pe__unpack_bundle,
native_find_rsc,
native_parameter,
pe__print_bundle,
pe__bundle_active,
pe__bundle_resource_state,
native_location,
pe__free_bundle,
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
}
};
static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
return pe_native;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
return pe_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
return pe_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
return pe_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
return pe_container;
}
return pe_unknown;
}
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
add_hash_param(user_data, key, value);
}
static void
expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
pe_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
}
/* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
rule_data, parent_orig_meta, NULL, FALSE, data_set);
p = p->parent;
}
/* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
if (parent_orig_meta != NULL) {
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, parent_orig_meta);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
/* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
/* Attributes that already exist in the child lease are not updated. */
dup_attr(key, value, meta_hash);
}
}
if (parent_orig_meta != NULL) {
g_hash_table_destroy(parent_orig_meta);
}
return ;
}
void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
const char *prop_value = crm_element_value(rsc->xml, prop_name);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
}
/* check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
if (rsc->parent) {
g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
}
}
void
get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
const pe_node_t *node, pe_working_set_t *data_set)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
}
}
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, "name");
const char *role = crm_element_value(op, "role");
char *key = NULL;
if ((role == NULL)
|| pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
role = RSC_ROLE_UNKNOWN_S;
}
key = crm_strdup_printf("%s-%s", name, role);
return key;
}
static gboolean
unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *clone = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
}
template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
XML_ATTR_ID, template_ref);
if (template == NULL) {
pe_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_replace(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
}
template_ops = find_xml_node(new_xml, "operations", FALSE);
for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
child_xml = pcmk__xe_next(child_xml)) {
xmlNode *new_child = NULL;
new_child = add_node_copy(new_xml, child_xml);
if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = pcmk__xe_first_child(template_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
add_node_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
free_xml(template_ops);
}
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
/* Disable multi-level templates for now */
/*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
} */
return TRUE;
}
static gboolean
add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for processing resource list of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
static bool
detect_promotable(pe_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
if (crm_is_true(promotable)) {
return TRUE;
}
// @COMPAT deprecated since 2.0.0
if (pcmk__str_eq(crm_element_name(rsc->xml), PCMK_XE_PROMOTABLE_LEGACY,
pcmk__str_casei)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
strdup(XML_BOOLEAN_TRUE));
return TRUE;
}
return FALSE;
}
static void
free_params_table(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \brief Get a table of resource parameters
*
* \param[in,out] rsc Resource to query
* \param[in] node Node for evaluating rules (NULL for defaults)
* \param[in,out] data_set Cluster working set
*
* \return Hash table containing resource parameter names and values
* (or NULL if \p rsc or \p data_set is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
pe_working_set_t *data_set)
{
GHashTable *params_on_node = NULL;
/* A NULL node is used to request the resource's default parameters
* (not evaluated for node), but we always want something non-NULL
* as a hash table key.
*/
const char *node_name = "";
// Sanity check
if ((rsc == NULL) || (data_set == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
node_name = node->details->uname;
}
// Find the parameter table for given node
if (rsc->parameter_cache == NULL) {
rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
} else {
params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
}
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
get_rsc_attributes(params_on_node, rsc, node, data_set);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
return params_on_node;
}
/*!
* \internal
* \brief Unpack a resource's "requires" meta-attribute
*
* \param[in,out] rsc Resource being unpacked
* \param[in] value Value of "requires" meta-attribute
* \param[in] is_default Whether \p value was selected by default
*/
static void
unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else {
pe__set_resource_flags(rsc,
pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
} else if ((rsc->variant == pe_native)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
value = PCMK__VALUE_FENCING;
} else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
value = PCMK__VALUE_QUORUM;
}
if (orig_value != NULL) {
pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to '%s' because '%s' is not valid",
rsc->id, value, orig_value);
}
unpack_requires(rsc, value, true);
return;
}
pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
(is_default? " (default)" : ""));
}
/*!
* \internal
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
* \c pe_resource_t object.
*
* \param[in] xml_obj XML node containing the resource's configuration
* \param[out] rsc Where to store the unpacked resource information
* \param[in] parent Resource's parent, if any
* \param[in,out] data_set Cluster working set
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
* the caller is responsible for freeing it using its variant-specific
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_resource_t *parent, pe_working_set_t *data_set)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
const char *value = NULL;
const char *id = NULL;
bool guest_node = false;
bool remote_node = false;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
CRM_CHECK(rsc != NULL, return EINVAL);
CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
*rsc = NULL;
return EINVAL);
rule_data.now = data_set->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
crm_element_name(xml_obj));
return pcmk_rc_unpack_error;
}
if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
return pcmk_rc_unpack_error;
}
*rsc = calloc(1, sizeof(pe_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
(*rsc)->cluster = data_set;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
(*rsc)->xml = expanded_xml;
(*rsc)->orig_xml = xml_obj;
} else {
(*rsc)->xml = xml_obj;
(*rsc)->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
(*rsc)->ops_xml = expand_idref(ops, data_set->input);
(*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
if ((*rsc)->variant == pe_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
id, crm_element_name((*rsc)->xml));
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
(*rsc)->meta = pcmk__strkey_table(free, free);
(*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
(*rsc)->known_on = pcmk__strkey_table(NULL, free);
value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
if (value) {
(*rsc)->id = crm_strdup_printf("%s:%s", id, value);
add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
} else {
(*rsc)->id = strdup(id);
}
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
(*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
(*rsc)->flags = 0;
pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe__set_resource_flags(*rsc, pe_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = RSC_ROLE_STOPPED;
(*rsc)->next_role = RSC_ROLE_UNKNOWN;
(*rsc)->recovery_type = recovery_stop_start;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
(*rsc)->priority = char2score(value);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
guest_node = true;
} else {
remote_node = true;
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_managed);
} else {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_unique);
}
if (detect_promotable(*rsc)) {
pe__set_resource_flags(*rsc, pe_rsc_promotable);
}
} else {
pe__set_resource_flags(*rsc, pe_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
pe_warn_once(pe_wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
(*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
(*rsc)->recovery_type = recovery_stop_only;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
(*rsc)->recovery_type = recovery_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
(*rsc)->recovery_type = recovery_stop_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
} else { // "stop_start"
if (!pcmk__str_eq(value, "stop_start",
pcmk__str_casei|pcmk__str_null_matches)) {
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
(*rsc)->recovery_type = recovery_stop_start;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->stickiness = char2score(value);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
/* @TODO We use 1 here to preserve previous behavior, but this
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
pe_warn_once(pe_wo_neg_threshold,
XML_RSC_ATTR_FAIL_STICKINESS
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
}
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
pe__set_resource_flags(*rsc, pe_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
* reconnect_interval.
*
* @TODO Evaluate per node before using
*/
value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
(*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
/* we want to override any default failure_timeout in use when remote
* reconnect_interval is in use. */
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
(*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
(*rsc)->utilization, NULL, FALSE, data_set);
if (expanded_xml) {
if (add_template_rsc(xml_obj, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
}
return pcmk_rc_ok;
}
-void
-common_update_score(pe_resource_t * rsc, const char *id, int score)
-{
- pe_node_t *node = NULL;
-
- node = pe_hash_table_lookup(rsc->allowed_nodes, id);
- if (node != NULL) {
- pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score);
- node->weight = pcmk__add_scores(node->weight, score);
- }
-
- if (rsc->children) {
- GList *gIter = rsc->children;
-
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
-
- common_update_score(child_rsc, id, score);
- }
- }
-}
-
gboolean
is_parent(pe_resource_t *child, pe_resource_t *rsc)
{
pe_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
while (parent->parent != NULL) {
if (parent->parent == rsc) {
return TRUE;
}
parent = parent->parent;
}
return FALSE;
}
pe_resource_t *
uber_parent(pe_resource_t * rsc)
{
pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL && parent->parent->variant != pe_container) {
parent = parent->parent;
}
return parent;
}
/*!
* \internal
* \brief Get the topmost parent of a resource as a const pointer
*
* \param[in] rsc Resource to check
* \param[in] include_bundle If true, go all the way to bundle
*
* \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
const pe_resource_t *
pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
{
const pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
if (!include_bundle && (parent->parent->variant == pe_container)) {
break;
}
parent = parent->parent;
}
return parent;
}
void
common_free(pe_resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameter_cache != NULL) {
g_hash_table_destroy(rsc->parameter_cache);
}
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
rsc->orig_xml = NULL;
/* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
} else if (rsc->orig_xml) {
free_xml(rsc->xml);
rsc->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
pe_rsc_trace(rsc, "Resource freed");
free(rsc->id);
free(rsc->clone_name);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc);
}
/*!
* \internal
* \brief Count a node and update most preferred to it as appropriate
*
* \param[in] rsc An active resource
* \param[in] node A node that \p rsc is active on
* \param[in,out] active This will be set to \p node if \p node is more
* preferred than the current value
* \param[in,out] count_all If not NULL, this will be incremented
* \param[in,out] count_clean If not NULL, this will be incremented if \p node
* is online and clean
*
* \return true if the count should continue, or false if sufficiently known
*/
bool
pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
pe_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
bool is_happy = false;
CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
return false);
is_happy = node->details->online && !node->details->unclean;
if (count_all != NULL) {
++*count_all;
}
if ((count_clean != NULL) && is_happy) {
++*count_clean;
}
if ((count_all != NULL) || (count_clean != NULL)) {
keep_looking = true; // We're counting, so go through entire list
}
if (rsc->partial_migration_source != NULL) {
if (node->details == rsc->partial_migration_source->details) {
*active = node; // This is the migration source
} else {
keep_looking = true;
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
} else {
keep_looking = true;
}
}
if (*active == NULL) {
*active = node; // This is the first node checked
}
return keep_looking;
}
// Shared implementation of resource_object_functions_t:active_node()
static pe_node_t *
active_node(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
pe_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
}
if (count_clean != NULL) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
}
return active;
}
/*!
* \brief
* \internal Find and count active nodes according to "requires"
*
* \param[in] rsc Resource to check
* \param[out] count If not NULL, will be set to count of active nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
* \note This is a convenience wrapper for active_node() where the count of all
* active nodes or only clean active nodes is desired according to the
* "requires" meta-attribute.
*/
pe_node_t *
pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
*count = 0;
}
return NULL;
} else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
return rsc->fns->active_node(rsc, count, NULL);
} else {
return rsc->fns->active_node(rsc, NULL, count);
}
}
void
pe__count_common(pe_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
((pe_resource_t *) item->data)->fns->count(item->data);
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role > RSC_ROLE_STOPPED)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
rsc->cluster->blocked_resources++;
}
}
}
/*!
* \internal
* \brief Update a resource's next role
*
* \param[in,out] rsc Resource to be updated
* \param[in] role Resource's new next role
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
rsc->id, role2text(rsc->next_role), role2text(role), why);
rsc->next_role = role;
}
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Mon, May 12, 5:11 PM (21 h, 55 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1753951
Default Alt Text
(132 KB)
Attached To
Mode
rP Pacemaker
Attached
Detach File
Event Timeline
Log In to Comment