Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index c389b3fb90..0a7c1a4c6f 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,712 +1,709 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
# include <stdint.h>
# include <string.h>
# include <crm/pengine/status.h>
# include <crm/pengine/remote_internal.h>
# include <crm/common/internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
enum pe__clone_flags {
// Whether instances should be started sequentially
pe__clone_ordered = (1 << 0),
// Whether promotion scores have been added
pe__clone_promotion_added = (1 << 1),
// Whether promotion constraints have been added
pe__clone_promotion_constrained = (1 << 2),
};
bool pe__clone_is_ordered(const pe_resource_t *clone);
int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
enum pe__group_flags {
pe__group_ordered = (1 << 0), // Members start sequentially
pe__group_colocated = (1 << 1), // Members must be on same node
};
bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
pe_resource_t *pe__last_group_member(const pe_resource_t *group);
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
# define pe_err(fmt...) do { \
was_processing_error = TRUE; \
pcmk__config_err(fmt); \
} while (0)
# define pe_warn(fmt...) do { \
was_processing_warning = TRUE; \
pcmk__config_warn(fmt); \
} while (0)
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
#define pe__set_working_set_flags(working_set, flags_to_set) do { \
(working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_working_set_flags(working_set, flags_to_clear) do { \
(working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", crm_system_name, \
(working_set)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_resource_flags(resource, flags_to_set) do { \
(resource)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_resource_flags(resource, flags_to_clear) do { \
(resource)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Resource", (resource)->id, (resource)->flags, \
(flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_action_flags(action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags(action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
action_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Action", action_name, \
(action_flags), \
(flags_to_set), #flags_to_set); \
} while (0)
#define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
action_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Action", action_name, \
(action_flags), \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_action_flags_as(function, line, action, flags_to_set) do { \
(action)->flags = pcmk__set_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
(action)->flags = pcmk__clear_flags_as((function), (line), \
LOG_TRACE, \
"Action", (action)->uuid, \
(action)->flags, \
(flags_to_clear), \
#flags_to_clear); \
} while (0)
#define pe__set_order_flags(order_flags, flags_to_set) do { \
order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_set), \
#flags_to_set); \
} while (0)
#define pe__clear_order_flags(order_flags, flags_to_clear) do { \
order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Ordering", "constraint", \
order_flags, (flags_to_clear), \
#flags_to_clear); \
} while (0)
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = (1 << 0),
pe_wo_restart_type = (1 << 1),
pe_wo_role_after = (1 << 2),
pe_wo_poweroff = (1 << 3),
pe_wo_require_all = (1 << 4),
pe_wo_order_score = (1 << 5),
pe_wo_neg_threshold = (1 << 6),
pe_wo_remove_after = (1 << 7),
pe_wo_ping_node = (1 << 8),
pe_wo_order_inst = (1 << 9),
pe_wo_coloc_inst = (1 << 10),
pe_wo_group_order = (1 << 11),
pe_wo_group_coloc = (1 << 12),
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Warn-once", "logging", pe_wo, \
(pe_wo_bit), #pe_wo_bit); \
} \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
pe_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
GList *node_list_rh; // List of pe_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
uint32_t flags; // Group of enum pe_ordering flags
void *lh_opaque;
pe_resource_t *lh_rsc;
pe_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
pe_resource_t *rh_rsc;
pe_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
typedef struct notify_data_s {
GSList *keys; // Environment variable name/value pairs
const char *action;
pe_action_t *pre;
pe_action_t *post;
pe_action_t *pre_done;
pe_action_t *post_done;
GList *active; /* notify_entry_t* */
GList *inactive; /* notify_entry_t* */
GList *start; /* notify_entry_t* */
GList *stop; /* notify_entry_t* */
GList *demote; /* notify_entry_t* */
GList *promote; /* notify_entry_t* */
GList *promoted; /* notify_entry_t* */
GList *unpromoted; /* notify_entry_t* */
GHashTable *allowed_nodes;
} notify_data_t;
const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
bool include_bundle);
int pe__clone_max(const pe_resource_t *clone);
int pe__clone_promoted_max(const pe_resource_t *clone);
int pe__clone_promoted_node_max(const pe_resource_t *clone);
pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
bool optional, bool runnable);
void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
bool any_demoting);
bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
pe_working_set_t * data_set);
pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
void pe_metadata(pcmk__output_t *out);
void verify_pe_options(GHashTable * options);
void common_update_score(pe_resource_t * rsc, const char *id, int score);
void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
int flags);
gboolean native_active(pe_resource_t * rsc, gboolean all);
gboolean group_active(pe_resource_t * rsc, gboolean all);
gboolean clone_active(pe_resource_t * rsc, gboolean all);
gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
//! \deprecated This function will be removed in a future release
void native_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void group_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
void *print_data);
gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
const pe_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
char *pe__node_display_name(pe_node_t *node, bool print_detail);
// Clone notifications (pe_notif.c)
void pe__create_notifications(pe_resource_t *rsc, notify_data_t *n_data);
notify_data_t *pe__clone_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
pe_action_t *action,
pe_action_t *complete);
void pe__free_notification_data(notify_data_t *n_data);
void pe__order_notifs_after_fencing(const pe_action_t *action,
pe_resource_t *rsc,
pe_action_t *stonith_op);
static inline const char *
pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
{
return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
}
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_default(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_default(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
void native_free(pe_resource_t * rsc);
void group_free(pe_resource_t * rsc);
void clone_free(pe_resource_t * rsc);
void pe__free_bundle(pe_resource_t *rsc);
enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
gboolean current);
void pe__count_common(pe_resource_t *rsc);
void pe__count_bundle(pe_resource_t *rsc);
void common_free(pe_resource_t * rsc);
pe_node_t *pe__copy_node(const pe_node_t *this_node);
extern time_t get_effective_time(pe_working_set_t * data_set);
/* Failure handling utilities (from failcounts.c) */
// bit flags for fail count handling options
enum pe_fc_flags_e {
pe_fc_default = (1 << 0),
pe_fc_effective = (1 << 1), // don't count expired failures
pe_fc_fillers = (1 << 2), // if container, include filler failures in count
};
int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
time_t *last_failure, uint32_t flags,
const xmlNode *xml_op);
pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
const char *reason,
pe_working_set_t *data_set);
/* Functions for finding/counting a resource's active nodes */
-pe_node_t *pe__find_active_on(const pe_resource_t *rsc,
- unsigned int *count_all,
- unsigned int *count_clean);
pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
unsigned int *count);
static inline pe_node_t *
pe__current_node(const pe_resource_t *rsc)
{
- return pe__find_active_on(rsc, NULL, NULL);
+ return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
GHashTable *pe__node_list2table(const GList *list);
static inline gpointer
pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
{
if (hash) {
return g_hash_table_lookup(hash, key);
}
return NULL;
}
extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
void pe__show_node_weights_as(const char *file, const char *function,
int line, bool to_log, const pe_resource_t *rsc,
const char *comment, GHashTable *nodes,
pe_working_set_t *data_set);
#define pe__show_node_weights(level, rsc, text, nodes, data_set) \
pe__show_node_weights_as(__FILE__, __func__, __LINE__, \
(level), (rsc), (text), (nodes), (data_set))
xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
const pe_node_t *on_node, gboolean optional,
gboolean foo, pe_working_set_t *data_set);
# define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \
optional, TRUE, rsc->cluster);
# define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
# define stopped_action(rsc, node, optional) custom_action( \
rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \
optional, TRUE, rsc->cluster);
# define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \
optional, TRUE, rsc->cluster);
# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
# define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
rsc, start_key(rsc), CRMD_ACTION_START, node, \
optional, TRUE, rsc->cluster)
# define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
# define started_action(rsc, node, optional) custom_action( \
rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \
optional, TRUE, rsc->cluster)
# define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \
optional, TRUE, rsc->cluster)
# define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
# define promoted_action(rsc, node, optional) custom_action( \
rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \
optional, TRUE, rsc->cluster)
# define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \
optional, TRUE, rsc->cluster)
# define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
# define demoted_action(rsc, node, optional) custom_action( \
rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \
optional, TRUE, rsc->cluster)
extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
pe_working_set_t *data_set);
pe_action_t *find_first_action(const GList *input, const char *uuid,
const char *task, const pe_node_t *on_node);
enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pe_node_t *on_node);
GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pe_action_t * action);
void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
const char *tag, pe_working_set_t *data_set);
extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
const char *why);
pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
const char *sub_id);
extern void destroy_ticket(gpointer data);
extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(const pe_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return false;
}
int pe__target_rc_from_xml(const xmlNode *xml_op);
gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
enum rsc_digest_cmp_val {
/*! Digests are the same */
RSC_DIGEST_MATCH = 0,
/*! Params that require a restart changed */
RSC_DIGEST_RESTART,
/*! Some parameter changed. */
RSC_DIGEST_ALL,
/*! rsc op didn't have a digest associated with it, so
* it is unknown if parameters changed or not. */
RSC_DIGEST_UNKNOWN,
};
typedef struct op_digest_cache_s {
enum rsc_digest_cmp_val rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
char *digest_all_calc;
char *digest_secure_calc;
char *digest_restart_calc;
} op_digest_cache_t;
op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
guint *interval_ms,
const pe_node_t *node,
const xmlNode *xml_op,
GHashTable *overrides,
bool calc_secure,
pe_working_set_t *data_set);
void pe__free_digests(gpointer ptr);
op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
const xmlNode *xml_op,
pe_node_t *node,
pe_working_set_t *data_set);
pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay,
pe_working_set_t *data_set);
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
const char *reason, pe_action_t *dependency,
pe_working_set_t *data_set);
char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
void pe__add_action_expected_result(pe_action_t *action, int expected_result);
void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
//! \deprecated This function will be removed in a future release
void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set);
//! \deprecated This function will be removed in a future release
void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
const pe_node_t *node, long options, void *print_data);
int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
const char *name, const pe_node_t *node,
unsigned int options);
int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
const char *name, const pe_node_t *node,
unsigned int options);
GList *pe__bundle_containers(const pe_resource_t *bundle);
pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
const pe_node_t *node);
bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
pe_working_set_t *data_set,
xmlNode *xml, const char *field);
const char *pe_node_attribute_calculated(const pe_node_t *node,
const char *name,
const pe_resource_t *rsc);
const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
bool pe__is_universal_clone(const pe_resource_t *rsc,
const pe_working_set_t *data_set);
void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
pe_node_t *node, enum pe_check_parameters,
pe_working_set_t *data_set);
void pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*,
const xmlNode*,
enum pe_check_parameters));
void pe__free_param_checks(pe_working_set_t *data_set);
bool pe__shutdown_requested(const pe_node_t *node);
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
/*!
* \internal
* \brief Register xml formatting message functions.
*
* \param[in,out] out Output object to register messages with
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
gboolean overwrite, pe_working_set_t *data_set);
bool pe__resource_is_disabled(const pe_resource_t *rsc);
pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
const pe_node_t *node,
pe_working_set_t *data_set);
GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
const char *pe__clone_child_id(const pe_resource_t *rsc);
int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
int pe__node_health(pe_node_t *node);
static inline enum pcmk__health_strategy
pe__health_strategy(pe_working_set_t *data_set)
{
return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY));
}
static inline int
pe__health_score(const char *option, pe_working_set_t *data_set)
{
return char2score(pe_pref(data_set->config_hash, option));
}
/*!
* \internal
* \brief Return a string suitable for logging as a node name
*
* \param[in] node Node to return a node name string for
*
* \return Node name if available, otherwise node ID if available,
* otherwise "unspecified node" if node is NULL or "unidentified node"
* if node has neither a name nor ID.
*/
static inline const char *
pe__node_name(const pe_node_t *node)
{
if (node == NULL) {
return "unspecified node";
} else if (node->details->uname != NULL) {
return node->details->uname;
} else if (node->details->id != NULL) {
return node->details->id;
} else {
return "unidentified node";
}
}
/*!
* \internal
* \brief Check whether two node objects refer to the same node
*
* \param[in] node1 First node object to compare
* \param[in] node2 Second node object to compare
*
* \return true if \p node1 and \p node2 refer to the same node
*/
static inline bool
pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
{
return (node1 != NULL) && (node2 != NULL)
&& (node1->details == node2->details);
}
#endif
diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
index 2b10ec097c..cbd5edb07b 100644
--- a/include/crm/pengine/pe_types.h
+++ b/include/crm/pengine/pe_types.h
@@ -1,544 +1,560 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_PENGINE_PE_TYPES__H
# define PCMK__CRM_PENGINE_PE_TYPES__H
# include <stdbool.h> // bool
# include <sys/types.h> // time_t
# include <libxml/tree.h> // xmlNode
# include <glib.h> // gboolean, guint, GList, GHashTable
# include <crm/common/iso8601.h>
# include <crm/pengine/common.h>
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Data types for cluster status
* \ingroup pengine
*/
typedef struct pe_node_s pe_node_t;
typedef struct pe_action_s pe_action_t;
typedef struct pe_resource_s pe_resource_t;
typedef struct pe_working_set_s pe_working_set_t;
enum pe_obj_types {
pe_unknown = -1,
pe_native = 0,
pe_group = 1,
pe_clone = 2,
pe_container = 3,
};
typedef struct resource_object_functions_s {
gboolean (*unpack) (pe_resource_t*, pe_working_set_t*);
pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search,
const pe_node_t *node, int flags);
/* parameter result must be free'd */
char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*,
pe_working_set_t*);
//! \deprecated will be removed in a future release
void (*print) (pe_resource_t*, const char*, long, void*);
gboolean (*active) (pe_resource_t*, gboolean);
enum rsc_role_e (*state) (const pe_resource_t*, gboolean);
pe_node_t *(*location) (const pe_resource_t*, GList**, int);
void (*free) (pe_resource_t*);
void (*count) (pe_resource_t*);
gboolean (*is_filtered) (const pe_resource_t*, GList *, gboolean);
+
+ /*!
+ * \brief
+ * \internal Find a node (and optionally count all) where resource is active
+ *
+ * \param[in] rsc Resource to check
+ * \param[out] count_all If not NULL, set this to count of active nodes
+ * \param[out] count_clean If not NULL, set this to count of clean nodes
+ *
+ * \return A node where the resource is active, preferring the source node
+ * if the resource is involved in a partial migration or a clean,
+ * online node if the resource's "requires" is "quorum" or
+ * "nothing", or NULL if the resource is inactive.
+ */
+ pe_node_t *(*active_node)(const pe_resource_t *rsc, unsigned int *count_all,
+ unsigned int *count_clean);
} resource_object_functions_t;
typedef struct resource_alloc_functions_s resource_alloc_functions_t;
enum pe_quorum_policy {
no_quorum_freeze,
no_quorum_stop,
no_quorum_ignore,
no_quorum_suicide,
no_quorum_demote
};
enum node_type {
node_ping, //! \deprecated Do not use
node_member,
node_remote
};
//! \deprecated will be removed in a future release
enum pe_restart {
pe_restart_restart, //! \deprecated will be removed in a future release
pe_restart_ignore //! \deprecated will be removed in a future release
};
//! Determine behavior of pe_find_resource_with_flags()
enum pe_find {
pe_find_renamed = 0x001, //!< match resource ID or LRM history ID
pe_find_anon = 0x002, //!< match base name of anonymous clone instances
pe_find_clone = 0x004, //!< match only clone instances
pe_find_current = 0x008, //!< match resource active on specified node
pe_find_inactive = 0x010, //!< match resource not running anywhere
pe_find_any = 0x020, //!< match base name of any clone instance
};
// @TODO Make these an enum
# define pe_flag_have_quorum 0x00000001ULL
# define pe_flag_symmetric_cluster 0x00000002ULL
# define pe_flag_maintenance_mode 0x00000008ULL
# define pe_flag_stonith_enabled 0x00000010ULL
# define pe_flag_have_stonith_resource 0x00000020ULL
# define pe_flag_enable_unfencing 0x00000040ULL
# define pe_flag_concurrent_fencing 0x00000080ULL
# define pe_flag_stop_rsc_orphans 0x00000100ULL
# define pe_flag_stop_action_orphans 0x00000200ULL
# define pe_flag_stop_everything 0x00000400ULL
# define pe_flag_start_failure_fatal 0x00001000ULL
//! \deprecated
# define pe_flag_remove_after_stop 0x00002000ULL
# define pe_flag_startup_fencing 0x00004000ULL
# define pe_flag_shutdown_lock 0x00008000ULL
# define pe_flag_startup_probes 0x00010000ULL
# define pe_flag_have_status 0x00020000ULL
# define pe_flag_have_remote_nodes 0x00040000ULL
# define pe_flag_quick_location 0x00100000ULL
# define pe_flag_sanitized 0x00200000ULL
//! \deprecated
# define pe_flag_stdout 0x00400000ULL
//! Don't count total, disabled and blocked resource instances
# define pe_flag_no_counts 0x00800000ULL
/*! Skip deprecated code that is kept solely for backward API compatibility.
* (Internal code should always set this.)
*/
# define pe_flag_no_compat 0x01000000ULL
# define pe_flag_show_scores 0x02000000ULL
# define pe_flag_show_utilization 0x04000000ULL
/*!
* When scheduling, only unpack the CIB (including constraints), calculate
* as much cluster status as possible, and apply node health.
*/
# define pe_flag_check_config 0x08000000ULL
struct pe_working_set_s {
xmlNode *input;
crm_time_t *now;
/* options extracted from the input */
char *dc_uuid;
pe_node_t *dc_node;
const char *stonith_action;
const char *placement_strategy;
unsigned long long flags;
int stonith_timeout;
enum pe_quorum_policy no_quorum_policy;
GHashTable *config_hash;
GHashTable *tickets;
// Actions for which there can be only one (e.g. fence nodeX)
GHashTable *singletons;
GList *nodes;
GList *resources;
GList *placement_constraints;
GList *ordering_constraints;
GList *colocation_constraints;
GList *ticket_constraints;
GList *actions;
xmlNode *failed;
xmlNode *op_defaults;
xmlNode *rsc_defaults;
/* stats */
int num_synapse;
int max_valid_nodes; //! Deprecated (will be removed in a future release)
int order_id;
int action_id;
/* final output */
xmlNode *graph;
GHashTable *template_rsc_sets;
const char *localhost;
GHashTable *tags;
int blocked_resources;
int disabled_resources;
GList *param_check; // History entries that need to be checked
GList *stop_needed; // Containers that need stop actions
time_t recheck_by; // Hint to controller to re-run scheduler by this time
int ninstances; // Total number of resource instances
guint shutdown_lock;// How long (seconds) to lock resources to shutdown node
int priority_fencing_delay; // Priority fencing delay
void *priv;
};
enum pe_check_parameters {
/* Clear fail count if parameters changed for un-expired start or monitor
* last_failure.
*/
pe_check_last_failure,
/* Clear fail count if parameters changed for start, monitor, promote, or
* migrate_from actions for active resources.
*/
pe_check_active,
};
struct pe_node_shared_s {
const char *id;
const char *uname;
enum node_type type;
/* @TODO convert these flags into a bitfield */
gboolean online;
gboolean standby;
gboolean standby_onfail;
gboolean pending;
gboolean unclean;
gboolean unseen;
gboolean shutdown;
gboolean expected_up;
gboolean is_dc;
gboolean maintenance;
gboolean rsc_discovery_enabled;
gboolean remote_requires_reset;
gboolean remote_was_fenced;
gboolean remote_maintenance; /* what the remote-rsc is thinking */
gboolean unpacked;
int num_resources;
pe_resource_t *remote_rsc;
GList *running_rsc; /* pe_resource_t* */
GList *allocated_rsc; /* pe_resource_t* */
GHashTable *attrs; /* char* => char* */
GHashTable *utilization;
GHashTable *digest_cache; //!< cache of calculated resource digests
int priority; // calculated based on the priority of resources running on the node
pe_working_set_t *data_set; //!< Cluster that this node is part of
};
struct pe_node_s {
int weight;
gboolean fixed; //!< \deprecated Will be removed in a future release
int count;
struct pe_node_shared_s *details;
int rsc_discover_mode;
};
# define pe_rsc_orphan 0x00000001ULL
# define pe_rsc_managed 0x00000002ULL
# define pe_rsc_block 0x00000004ULL
# define pe_rsc_orphan_container_filler 0x00000008ULL
# define pe_rsc_notify 0x00000010ULL
# define pe_rsc_unique 0x00000020ULL
# define pe_rsc_fence_device 0x00000040ULL
# define pe_rsc_promotable 0x00000080ULL
# define pe_rsc_provisional 0x00000100ULL
# define pe_rsc_allocating 0x00000200ULL
# define pe_rsc_merging 0x00000400ULL
# define pe_rsc_restarting 0x00000800ULL
# define pe_rsc_stop 0x00001000ULL
# define pe_rsc_reload 0x00002000ULL
# define pe_rsc_allow_remote_remotes 0x00004000ULL
# define pe_rsc_critical 0x00008000ULL
# define pe_rsc_failed 0x00010000ULL
# define pe_rsc_detect_loop 0x00020000ULL
# define pe_rsc_runnable 0x00040000ULL
# define pe_rsc_start_pending 0x00080000ULL
//!< \deprecated Do not use
# define pe_rsc_starting 0x00100000ULL
//!< \deprecated Do not use
# define pe_rsc_stopping 0x00200000ULL
# define pe_rsc_stop_unexpected 0x00400000ULL
# define pe_rsc_allow_migrate 0x00800000ULL
# define pe_rsc_failure_ignored 0x01000000ULL
# define pe_rsc_maintenance 0x04000000ULL
# define pe_rsc_is_container 0x08000000ULL
# define pe_rsc_needs_quorum 0x10000000ULL
# define pe_rsc_needs_fencing 0x20000000ULL
# define pe_rsc_needs_unfencing 0x40000000ULL
/* *INDENT-OFF* */
enum pe_action_flags {
pe_action_pseudo = 0x00001,
pe_action_runnable = 0x00002,
pe_action_optional = 0x00004,
pe_action_print_always = 0x00008,
pe_action_have_node_attrs = 0x00010,
pe_action_implied_by_stonith = 0x00040,
pe_action_migrate_runnable = 0x00080,
pe_action_dumped = 0x00100,
pe_action_processed = 0x00200,
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
pe_action_clear = 0x00400, //! \deprecated Unused
#endif
pe_action_dangle = 0x00800,
/* This action requires one or more of its dependencies to be runnable.
* We use this to clear the runnable flag before checking dependencies.
*/
pe_action_requires_any = 0x01000,
pe_action_reschedule = 0x02000,
pe_action_tracking = 0x04000,
pe_action_dedup = 0x08000, //! Internal state tracking when creating graph
pe_action_dc = 0x10000, //! Action may run on DC instead of target
};
/* *INDENT-ON* */
struct pe_resource_s {
char *id;
char *clone_name;
xmlNode *xml;
xmlNode *orig_xml;
xmlNode *ops_xml;
pe_working_set_t *cluster;
pe_resource_t *parent;
enum pe_obj_types variant;
void *variant_opaque;
resource_object_functions_t *fns;
resource_alloc_functions_t *cmds;
enum rsc_recovery_type recovery_type;
enum pe_restart restart_type; //!< \deprecated will be removed in future release
int priority;
int stickiness;
int sort_index;
int failure_timeout;
int migration_threshold;
guint remote_reconnect_ms;
char *pending_task;
unsigned long long flags;
// @TODO merge these into flags
gboolean is_remote_node;
gboolean exclusive_discover;
//!@{
//! This field should be treated as internal to Pacemaker
GList *rsc_cons_lhs; // List of pcmk__colocation_t*
GList *rsc_cons; // List of pcmk__colocation_t*
GList *rsc_location; // List of pe__location_t*
GList *actions; // List of pe_action_t*
GList *rsc_tickets; // List of rsc_ticket*
//!@}
pe_node_t *allocated_to;
pe_node_t *partial_migration_target;
pe_node_t *partial_migration_source;
GList *running_on; /* pe_node_t* */
GHashTable *known_on; /* pe_node_t* */
GHashTable *allowed_nodes; /* pe_node_t* */
enum rsc_role_e role;
enum rsc_role_e next_role;
GHashTable *meta;
GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead
GHashTable *utilization;
GList *children; /* pe_resource_t* */
GList *dangling_migrations; /* pe_node_t* */
pe_resource_t *container;
GList *fillers;
// @COMPAT These should be made const at next API compatibility break
pe_node_t *pending_node; // Node on which pending_task is happening
pe_node_t *lock_node; // Resource is shutdown-locked to this node
time_t lock_time; // When shutdown lock started
/* Resource parameters may have node-attribute-based rules, which means the
* values can vary by node. This table is a cache of parameter name/value
* tables for each node (as needed). Use pe_rsc_params() to get the table
* for a given node.
*/
GHashTable *parameter_cache; // Key = node name, value = parameters table
};
struct pe_action_s {
int id;
int priority;
pe_resource_t *rsc;
pe_node_t *node;
xmlNode *op_entry;
char *task;
char *uuid;
char *cancel_task;
char *reason;
enum pe_action_flags flags;
enum rsc_start_requirement needs;
enum action_fail_response on_fail;
enum rsc_role_e fail_role;
GHashTable *meta;
GHashTable *extra;
/*
* These two varables are associated with the constraint logic
* that involves first having one or more actions runnable before
* then allowing this action to execute.
*
* These varables are used with features such as 'clone-min' which
* requires at minimum X number of cloned instances to be running
* before an order dependency can run. Another option that uses
* this is 'require-all=false' in ordering constrants. This option
* says "only require one instance of a resource to start before
* allowing dependencies to start" -- basically, require-all=false is
* the same as clone-min=1.
*/
/* current number of known runnable actions in the before list. */
int runnable_before;
/* the number of "before" runnable actions required for this action
* to be considered runnable */
int required_runnable_before;
GList *actions_before; /* pe_action_wrapper_t* */
GList *actions_after; /* pe_action_wrapper_t* */
/* Some of the above fields could be moved to the details,
* except for API backward compatibility.
*/
void *action_details; // varies by type of action
};
typedef struct pe_ticket_s {
char *id;
gboolean granted;
time_t last_granted;
gboolean standby;
GHashTable *state;
} pe_ticket_t;
typedef struct pe_tag_s {
char *id;
GList *refs;
} pe_tag_t;
//! Internal tracking for transition graph creation
enum pe_link_state {
pe_link_not_dumped, //! Internal tracking for transition graph creation
pe_link_dumped, //! Internal tracking for transition graph creation
pe_link_dup, //! \deprecated No longer used by Pacemaker
};
enum pe_discover_e {
pe_discover_always = 0,
pe_discover_never,
pe_discover_exclusive,
};
/* *INDENT-OFF* */
enum pe_ordering {
pe_order_none = 0x0, /* deleted */
pe_order_optional = 0x1, /* pure ordering, nothing implied */
pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */
pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */
pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */
pe_order_promoted_implies_first = 0x40, /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */
/* first requires then to be both runnable and migrate runnable. */
pe_order_implies_first_migratable = 0x80,
pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */
pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */
pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX',
* ensure instances of 'then' on 'nodeX' are too.
* Only really useful if 'then' is a clone and 'first' is not
*/
pe_order_probe = 0x800, /* If 'first->rsc' is
* - running but about to stop, ignore the constraint
* - otherwise, behave as runnable_left
*/
pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */
pe_order_stonith_stop = 0x2000, //<! \deprecated Will be removed in future release
pe_order_serialize_only = 0x4000, /* serialize */
pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */
pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */
pe_order_load = 0x200000, /* Only relevant if... */
pe_order_one_or_more = 0x400000, /* 'then' is runnable only if one or more of its dependencies are too */
pe_order_anti_colocation = 0x800000,
pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
pe_order_then_cancels_first = 0x2000000, // if 'then' becomes required, 'first' becomes optional
pe_order_trace = 0x4000000, /* test marker */
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
// \deprecated Use pe_order_promoted_implies_first instead
pe_order_implies_first_master = pe_order_promoted_implies_first,
#endif
};
/* *INDENT-ON* */
typedef struct pe_action_wrapper_s {
enum pe_ordering type;
enum pe_link_state state;
pe_action_t *action;
} pe_action_wrapper_t;
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include <crm/pengine/pe_types_compat.h>
#endif
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_PENGINE_PE_TYPES__H
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index 8927ec6c0a..736b6de3b2 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1518 +1,1518 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
static void promote_resource(pe_resource_t *rsc, pe_node_t *node,
bool optional);
static void assert_role_error(pe_resource_t *rsc, pe_node_t *node,
bool optional);
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the immediate next role when transitioning from one role
* to a target role. For example, when going from Stopped to Promoted, the
* next role is Unpromoted, because the resource must be started before it
* can be promoted. The current state then becomes Started, which is fed
* into this array again, giving a next role of Promoted.
*
* Current role Immediate next role Final target role
* ------------ ------------------- -----------------
*/
/* Unknown */ { RSC_ROLE_UNKNOWN, /* Unknown */
RSC_ROLE_STOPPED, /* Stopped */
RSC_ROLE_STOPPED, /* Started */
RSC_ROLE_STOPPED, /* Unpromoted */
RSC_ROLE_STOPPED, /* Promoted */
},
/* Stopped */ { RSC_ROLE_STOPPED, /* Unknown */
RSC_ROLE_STOPPED, /* Stopped */
RSC_ROLE_STARTED, /* Started */
RSC_ROLE_UNPROMOTED, /* Unpromoted */
RSC_ROLE_UNPROMOTED, /* Promoted */
},
/* Started */ { RSC_ROLE_STOPPED, /* Unknown */
RSC_ROLE_STOPPED, /* Stopped */
RSC_ROLE_STARTED, /* Started */
RSC_ROLE_UNPROMOTED, /* Unpromoted */
RSC_ROLE_PROMOTED, /* Promoted */
},
/* Unpromoted */ { RSC_ROLE_STOPPED, /* Unknown */
RSC_ROLE_STOPPED, /* Stopped */
RSC_ROLE_STOPPED, /* Started */
RSC_ROLE_UNPROMOTED, /* Unpromoted */
RSC_ROLE_PROMOTED, /* Promoted */
},
/* Promoted */ { RSC_ROLE_STOPPED, /* Unknown */
RSC_ROLE_UNPROMOTED, /* Stopped */
RSC_ROLE_UNPROMOTED, /* Started */
RSC_ROLE_UNPROMOTED, /* Unpromoted */
RSC_ROLE_PROMOTED, /* Promoted */
},
};
/*!
* \internal
* \brief Function to schedule actions needed for a role change
*
* \param[in,out] rsc Resource whose role is changing
* \param[in,out] node Node where resource will be in its next role
* \param[in] optional Whether scheduled actions should be optional
*/
typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node,
bool optional);
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the function needed to transition directly from one role
* to another. NULL indicates that nothing is needed.
*
* Current role Transition function Next role
* ------------ ------------------- ----------
*/
/* Unknown */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
assert_role_error, /* Started */
assert_role_error, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Stopped */ { assert_role_error, /* Unknown */
NULL, /* Stopped */
start_resource, /* Started */
start_resource, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Started */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
NULL, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Unpromoted */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
stop_resource, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Promoted */ { assert_role_error, /* Unknown */
demote_resource, /* Stopped */
demote_resource, /* Started */
demote_resource, /* Unpromoted */
NULL, /* Promoted */
},
};
/*!
* \internal
* \brief Get a list of a resource's allowed nodes sorted by node weight
*
* \param[in] rsc Resource to check
*
* \return List of allowed nodes sorted by node weight
*/
static GList *
sorted_allowed_nodes(const pe_resource_t *rsc)
{
if (rsc->allowed_nodes != NULL) {
GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
if (nodes != NULL) {
return pcmk__sort_nodes(nodes, pe__current_node(rsc));
}
}
return NULL;
}
/*!
* \internal
* \brief Assign a resource to its best allowed node, if possible
*
* \param[in,out] rsc Resource to choose a node for
* \param[in] prefer If not NULL, prefer this node when all else equal
*
* \return true if \p rsc could be assigned to a node, otherwise false
*/
static bool
assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
{
GList *nodes = NULL;
pe_node_t *chosen = NULL;
pe_node_t *best = NULL;
bool result = false;
const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
if (prefer == NULL) {
prefer = most_free_node;
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
// We've already finished assignment of resources to nodes
return rsc->allocated_to != NULL;
}
// Sort allowed nodes by weight
nodes = sorted_allowed_nodes(rsc);
if (nodes != NULL) {
best = (pe_node_t *) nodes->data; // First node has best score
}
if ((prefer != NULL) && (nodes != NULL)) {
// Get the allowed node version of prefer
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
pe__node_name(prefer), rsc->id);
/* Favor the preferred node as long as its weight is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's weight is less than INFINITY.
*/
} else if (chosen->weight < best->weight) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
pe__node_name(chosen), rsc->id);
chosen = NULL;
} else if (!pcmk__node_available(chosen, true, false)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
pe__node_name(chosen), rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc,
"Chose preferred node %s for %s (ignoring %d candidates)",
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
}
if ((chosen == NULL) && (best != NULL)) {
/* Either there is no preferred node, or the preferred node is not
* suitable, but another node is allowed to run the resource.
*/
chosen = best;
if (!pe_rsc_is_unique_clone(rsc->parent)
&& (chosen->weight > 0) // Zero not acceptable
&& pcmk__node_available(chosen, false, false)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* pcmk__assign_instances() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unassigned instances to prefer a node that's already
* running another instance.
*/
pe_node_t *running = pe__current_node(rsc);
if (running == NULL) {
// Nothing to do
} else if (!pcmk__node_available(running, true, false)) {
pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
rsc->id, pe__node_name(running));
} else {
int nodes_with_best_score = 1;
for (GList *iter = nodes->next; iter; iter = iter->next) {
pe_node_t *allowed = (pe_node_t *) iter->data;
if (allowed->weight != chosen->weight) {
// The nodes are sorted by weight, so no more are equal
break;
}
if (pe__same_node(allowed, running)) {
// Scores are equal, so prefer the current node
chosen = allowed;
}
nodes_with_best_score++;
}
if (nodes_with_best_score > 1) {
do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO),
"Chose %s for %s from %d nodes with score %s",
pe__node_name(chosen), rsc->id,
nodes_with_best_score,
pcmk_readable_score(chosen->weight));
}
}
}
pe_rsc_trace(rsc, "Chose %s for %s from %d candidates",
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
result = pcmk__finalize_assignment(rsc, chosen, false);
g_list_free(nodes);
return result;
}
/*!
* \internal
* \brief Apply a "this with" colocation to a node's allowed node scores
*
* \param[in,out] data Colocation to apply
* \param[in,out] user_data Resource being assigned
*/
static void
apply_this_with(gpointer data, gpointer user_data)
{
pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
pe_resource_t *rsc = (pe_resource_t *) user_data;
GHashTable *archive = NULL;
pe_resource_t *other = colocation->primary;
// In certain cases, we will need to revert the node scores
if ((colocation->dependent_role >= RSC_ROLE_PROMOTED)
|| ((colocation->score < 0) && (colocation->score > -INFINITY))) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
pe_rsc_trace(rsc,
"%s: Assigning colocation %s primary %s first"
"(score=%d role=%s)",
rsc->id, colocation->id, other->id,
colocation->score, role2text(colocation->dependent_role));
other->cmds->assign(other, NULL);
// Apply the colocation score to this resource's allowed node scores
rsc->cmds->apply_coloc_score(rsc, other, colocation, true);
if ((archive != NULL)
&& !pcmk__any_node_available(rsc->allowed_nodes)) {
pe_rsc_info(rsc,
"%s: Reverting scores from colocation with %s "
"because no nodes allowed",
rsc->id, other->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive != NULL) {
g_hash_table_destroy(archive);
}
}
/*!
* \internal
* \brief Apply a "with this" colocation to a node's allowed node scores
*
* \param[in,out] data Colocation to apply
* \param[in,out] user_data Resource being assigned
*/
static void
apply_with_this(void *data, void *user_data)
{
pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
pe_resource_t *rsc = (pe_resource_t *) user_data;
pe_resource_t *other = colocation->dependent;
const float factor = colocation->score / (float) INFINITY;
if (!pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
pe_rsc_trace(rsc,
"%s: Incorporating attenuated %s assignment scores due "
"to colocation %s", rsc->id, other->id, colocation->id);
pcmk__add_colocated_node_scores(other, rsc->id, &rsc->allowed_nodes,
colocation->node_attribute, factor,
pcmk__coloc_select_active);
}
/*!
* \internal
* \brief Update a Pacemaker Remote node once its connection has been assigned
*
* \param[in] connection Connection resource that has been assigned
*/
static void
remote_connection_assigned(const pe_resource_t *connection)
{
pe_node_t *remote_node = pe_find_node(connection->cluster->nodes,
connection->id);
CRM_CHECK(remote_node != NULL, return);
if ((connection->allocated_to != NULL)
&& (connection->next_role != RSC_ROLE_STOPPED)) {
crm_trace("Pacemaker Remote node %s will be online",
remote_node->details->id);
remote_node->details->online = TRUE;
if (remote_node->details->unseen) {
// Avoid unnecessary fence, since we will attempt connection
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Pacemaker Remote node %s will be shut down "
"(%sassigned connection's next role is %s)",
remote_node->details->id,
((connection->allocated_to == NULL)? "un" : ""),
role2text(connection->next_role));
remote_node->details->shutdown = TRUE;
}
}
/*!
* \internal
* \brief Assign a primitive resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*/
pe_node_t *
pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
{
CRM_ASSERT(rsc != NULL);
// Never assign a child without parent being assigned first
if ((rsc->parent != NULL)
&& !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "%s: Assigning parent %s first",
rsc->id, rsc->parent->id);
rsc->parent->cmds->assign(rsc->parent, prefer);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return rsc->allocated_to; // Assignment has already been done
}
// Ensure we detect assignment loops
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
pe__show_node_weights(true, rsc, "Pre-assignment", rsc->allowed_nodes,
rsc->cluster);
g_list_foreach(rsc->rsc_cons, apply_this_with, rsc);
pe__show_node_weights(true, rsc, "Post-this-with", rsc->allowed_nodes,
rsc->cluster);
g_list_foreach(rsc->rsc_cons_lhs, apply_with_this, rsc);
if (rsc->next_role == RSC_ROLE_STOPPED) {
pe_rsc_trace(rsc,
"Banning %s from all nodes because it will be stopped",
rsc->id);
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE,
rsc->cluster);
} else if ((rsc->next_role > rsc->role)
&& !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum)
&& (rsc->cluster->no_quorum_policy == no_quorum_freeze)) {
crm_notice("Resource %s cannot be elevated from %s to %s due to "
"no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
}
pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, rsc->cluster);
// Unmanage resource if fencing is enabled but no device is configured
if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
&& !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
// Unmanaged resources stay on their current node
const char *reason = NULL;
pe_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == RSC_ROLE_PROMOTED) {
reason = "promoted";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
pcmk__finalize_assignment(rsc, assign_to, true);
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) {
pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id);
pcmk__finalize_assignment(rsc, NULL, true);
} else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
&& assign_best_node(rsc, prefer)) {
// Assignment successful
} else if (rsc->allocated_to == NULL) {
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if (rsc->running_on != NULL) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
} else {
pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id,
pe__node_name(rsc->allocated_to));
}
pe__clear_resource_flags(rsc, pe_rsc_allocating);
if (rsc->is_remote_node) {
remote_connection_assigned(rsc);
}
return rsc->allocated_to;
}
/*!
* \internal
* \brief Schedule actions to bring resource down and back to current role
*
* \param[in,out] rsc Resource to restart
* \param[in,out] current Node that resource should be brought down on
* \param[in] need_stop Whether the resource must be stopped
* \param[in] need_promote Whether the resource must be promoted
*
* \return Role that resource would have after scheduled actions are taken
*/
static void
schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
bool need_stop, bool need_promote)
{
enum rsc_role_e role = rsc->role;
enum rsc_role_e next_role;
rsc_transition_fn fn = NULL;
pe__set_resource_flags(rsc, pe_rsc_restarting);
// Bring resource down to a stop on its current node
while (role != RSC_ROLE_STOPPED) {
next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, current, !need_stop);
role = next_role;
}
// Bring resource up to its next role on its next node
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pe_rsc_block)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
required = true;
}
pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
(required? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, !required);
role = next_role;
}
pe__clear_resource_flags(rsc, pe_rsc_restarting);
}
/*!
* \internal
* \brief If a resource's next role is not explicitly specified, set a default
*
* \param[in,out] rsc Resource to set next role for
*
* \return "explicit" if next role was explicitly set, otherwise "implicit"
*/
static const char *
set_default_next_role(pe_resource_t *rsc)
{
if (rsc->next_role != RSC_ROLE_UNKNOWN) {
return "explicit";
}
if (rsc->allocated_to == NULL) {
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment");
} else {
pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment");
}
return "implicit";
}
/*!
* \internal
* \brief Create an action to represent an already pending start
*
* \param[in,out] rsc Resource to create start action for
*/
static void
create_pending_start(pe_resource_t *rsc)
{
pe_action_t *start = NULL;
pe_rsc_trace(rsc,
"Creating action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
pe__set_action_flags(start, pe_action_print_always);
}
/*!
* \internal
* \brief Schedule actions needed to take a resource to its next role
*
* \param[in,out] rsc Resource to schedule actions for
*/
static void
schedule_role_transition_actions(pe_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
while (role != rsc->next_role) {
enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role];
rsc_transition_fn fn = NULL;
pe_rsc_trace(rsc,
"Creating action to take %s from %s to %s (ending at %s)",
rsc->id, role2text(role), role2text(next_role),
role2text(rsc->next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, false);
role = next_role;
}
}
/*!
* \internal
* \brief Create all actions needed for a given primitive resource
*
* \param[in,out] rsc Primitive resource to create actions for
*/
void
pcmk__primitive_create_actions(pe_resource_t *rsc)
{
bool need_stop = false;
bool need_promote = false;
bool is_moving = false;
bool allow_migrate = false;
bool multiply_active = false;
pe_node_t *current = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
const char *next_role_source = NULL;
CRM_ASSERT(rsc != NULL);
next_role_source = set_default_next_role(rsc);
pe_rsc_trace(rsc,
"Creating all actions for %s transition from %s to %s "
"(%s) on %s",
rsc->id, role2text(rsc->role), role2text(rsc->next_role),
next_role_source, pe__node_name(rsc->allocated_to));
- current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
+ current = rsc->fns->active_node(rsc, &num_all_active, &num_clean_active);
g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration,
rsc);
if ((current != NULL) && (rsc->allocated_to != NULL)
&& (current->details != rsc->allocated_to->details)
&& (rsc->next_role >= RSC_ROLE_STARTED)) {
pe_rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, pe__node_name(current),
pe__node_name(rsc->allocated_to));
is_moving = true;
allow_migrate = pcmk__rsc_can_migrate(rsc, current);
// This is needed even if migrating (though I'm not sure why ...)
need_stop = true;
}
// Check whether resource is partially migrated and/or multiply active
if ((rsc->partial_migration_source != NULL)
&& (rsc->partial_migration_target != NULL)
&& allow_migrate && (num_all_active == 2)
&& pe__same_node(current, rsc->partial_migration_source)
&& pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) {
/* A partial migration is in progress, and the migration target remains
* the same as when the migration began.
*/
pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue",
rsc->id, pe__node_name(rsc->partial_migration_source),
pe__node_name(rsc->partial_migration_target));
} else if ((rsc->partial_migration_source != NULL)
|| (rsc->partial_migration_target != NULL)) {
// A partial migration is in progress but can't be continued
if (num_all_active > 2) {
// The resource is migrating *and* multiply active!
crm_notice("Forcing recovery of %s because it is migrating "
"from %s to %s and possibly active elsewhere",
rsc->id, pe__node_name(rsc->partial_migration_source),
pe__node_name(rsc->partial_migration_target));
} else {
// The migration source or target isn't available
crm_notice("Forcing recovery of %s because it can no longer "
"migrate from %s to %s",
rsc->id, pe__node_name(rsc->partial_migration_source),
pe__node_name(rsc->partial_migration_target));
}
need_stop = true;
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = false;
} else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
multiply_active = (num_all_active > 1);
} else {
/* If a resource has "requires" set to nothing or quorum, don't consider
* it active on unclean nodes (similar to how all resources behave when
* stonith-enabled is false). We can start such resources elsewhere
* before fencing completes, and if we considered the resource active on
* the failed node, we would attempt recovery for being active on
* multiple nodes.
*/
multiply_active = (num_clean_active > 1);
}
if (multiply_active) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Resource was (possibly) incorrectly multiply active
pe_proc_err("%s resource %s might be active on %u nodes (%s)",
pcmk__s(class, "Untyped"), rsc->id, num_all_active,
recovery2text(rsc->recovery_type));
crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ"
"#Resource_is_Too_Active for more information");
switch (rsc->recovery_type) {
case recovery_stop_start:
need_stop = true;
break;
case recovery_stop_unexpected:
need_stop = true; // stop_resource() will skip expected node
pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
break;
default:
break;
}
} else {
pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
}
if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
create_pending_start(rsc);
}
if (is_moving) {
// Remaining tests are only for resources staying where they are
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
need_stop = true;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == RSC_ROLE_PROMOTED) {
need_promote = true;
}
}
} else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = true;
} else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL)
&& (rsc->allocated_to != NULL)) {
pe_action_t *start = NULL;
pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
if (!pcmk_is_set(start->flags, pe_action_optional)) {
// Recovery of a promoted resource
pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = true;
}
}
// Create any actions needed to bring resource down and back up to same role
schedule_restart_actions(rsc, current, need_stop, need_promote);
// Create any actions needed to take resource from this role to the next
schedule_role_transition_actions(rsc);
pcmk__create_recurring_actions(rsc);
if (allow_migrate) {
pcmk__create_migration_actions(rsc, current);
}
}
/*!
* \internal
* \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes
*
* \param[in] rsc Resource to check
*/
static void
rsc_avoids_remote_nodes(const pe_resource_t *rsc)
{
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (node->details->remote_rsc != NULL) {
node->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(const pe_resource_t *rsc)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (!pcmk__is_daemon) {
allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
}
return allowed_nodes;
}
/*!
* \internal
* \brief Create implicit constraints needed for a primitive resource
*
* \param[in,out] rsc Primitive resource to create implicit constraints for
*/
void
pcmk__primitive_internal_constraints(pe_resource_t *rsc)
{
GList *allowed_nodes = NULL;
bool check_unfencing = false;
bool check_utilization = false;
CRM_ASSERT(rsc != NULL);
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping implicit constraints for unmanaged resource %s",
rsc->id);
return;
}
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)
&& pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(rsc->cluster->placement_strategy,
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
pe_order_optional|pe_order_implies_then|pe_order_restart,
rsc->cluster);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pe_rsc_promotable)
|| (rsc->role > RSC_ROLE_UNPROMOTED)) {
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
pe_order_promoted_implies_first, rsc->cluster);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
pe_order_runnable_left, rsc->cluster);
}
// Don't clear resource history if probing on same node
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
NULL, pe_order_same_node|pe_order_then_cancels_first,
rsc->cluster);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || (rsc->container != NULL)) {
allowed_nodes = allowed_nodes_as_list(rsc);
}
if (check_unfencing) {
g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc);
}
if (check_utilization) {
pcmk__create_utilization_constraints(rsc, allowed_nodes);
}
if (rsc->container != NULL) {
pe_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Guest resources are not allowed to run on Pacemaker Remote nodes,
* to avoid nesting remotes. However, bundles are allowed.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
RSC_STOP, pe_order_optional);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(rsc->cluster,
rsc->container);
}
if (remote_rsc != NULL) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
pcmk__op_key(rsc->container->id, RSC_START, 0),
NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
NULL,
pe_order_implies_then|pe_order_runnable_left,
rsc->cluster);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
rsc->container,
pcmk__op_key(rsc->container->id, RSC_STOP, 0),
NULL, pe_order_implies_first, rsc->cluster);
if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
pcmk__new_colocation("resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL, true,
rsc->cluster);
}
}
if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* Remote connections and fencing devices are not allowed to run on
* Pacemaker Remote nodes
*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
/*!
* \internal
* \brief Apply a colocation's score to node weights or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node weights (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
enum pcmk__coloc_affects filter_results;
CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
return);
if (for_dependent) {
// Always process on behalf of primary resource
primary->cmds->apply_coloc_score(dependent, primary, colocation, false);
return;
}
filter_results = pcmk__colocation_affects(dependent, primary, colocation,
false);
pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
((colocation->score > 0)? "Colocating" : "Anti-colocating"),
dependent->id, primary->id, colocation->id, colocation->score,
filter_results);
switch (filter_results) {
case pcmk__coloc_affects_role:
pcmk__apply_coloc_to_priority(dependent, primary, colocation);
break;
case pcmk__coloc_affects_location:
pcmk__apply_coloc_to_weights(dependent, primary, colocation);
break;
default: // pcmk__coloc_affects_nothing
return;
}
}
/*!
* \internal
* \brief Return action flags for a given primitive resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node (ignored)
*
* \return Flags appropriate to \p action on \p node
*/
enum pe_action_flags
pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
{
CRM_ASSERT(action != NULL);
return action->flags;
}
/*!
* \internal
* \brief Check whether a node is a multiply active resource's expected node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return true if \p rsc is multiply active with multiple-active set to
* stop_unexpected, and \p node is the node where it will remain active
* \note This assumes that the resource's next role cannot be changed to stopped
* after this is called, which should be reasonable if status has already
* been unpacked and resources have been assigned to nodes.
*/
static bool
is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
{
return pcmk_all_flags_set(rsc->flags,
pe_rsc_stop_unexpected|pe_rsc_restarting)
&& (rsc->next_role > RSC_ROLE_STOPPED)
&& pe__same_node(rsc->allocated_to, node);
}
/*!
* \internal
* \brief Schedule actions needed to stop a resource wherever it is active
*
* \param[in,out] rsc Resource being stopped
* \param[in] node Node where resource is being stopped (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *current = (pe_node_t *) iter->data;
pe_action_t *stop = NULL;
if (is_expected_node(rsc, current)) {
/* We are scheduling restart actions for a multiply active resource
* with multiple-active=stop_unexpected, and this is where it should
* not be stopped.
*/
pe_rsc_trace(rsc,
"Skipping stop of multiply active resource %s "
"on expected node %s",
rsc->id, pe__node_name(current));
continue;
}
if (rsc->partial_migration_target != NULL) {
// Continue migration if node originally was and remains target
if (pe__same_node(current, rsc->partial_migration_target)
&& pe__same_node(current, rsc->allocated_to)) {
pe_rsc_trace(rsc,
"Skipping stop of %s on %s "
"because partial migration there will continue",
rsc->id, pe__node_name(current));
continue;
} else {
pe_rsc_trace(rsc,
"Forcing stop of %s on %s "
"because migration target changed",
rsc->id, pe__node_name(current));
optional = false;
}
}
pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
rsc->id, pe__node_name(current));
stop = stop_action(rsc, current, optional);
if (rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", true);
} else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
|pe_rsc_stop_unexpected)) {
/* We are stopping a multiply active resource on a node that is
* not its expected node, and we are still scheduling restart
* actions, so the stop is for being multiply active.
*/
pe_action_set_reason(stop, "being multiply active", true);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe__clear_action_flags(stop, pe_action_runnable);
}
if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
pcmk__schedule_cleanup(rsc, current, optional);
}
if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false,
rsc->cluster);
order_actions(stop, unfence, pe_order_implies_first);
if (!pcmk__node_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced",
rsc->id, pe__node_name(current));
}
}
}
}
/*!
* \internal
* \brief Schedule actions needed to start a resource on a node
*
* \param[in,out] rsc Resource being started
* \param[in,out] node Node where resource should be started
* \param[in] optional Whether actions should be optional
*/
static void
start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
pe_action_t *start = NULL;
CRM_ASSERT(node != NULL);
pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
(optional? "optional" : "required"), rsc->id,
pe__node_name(node), node->weight);
start = start_action(rsc, node, TRUE);
pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then);
if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
pe__clear_action_flags(start, pe_action_optional);
}
if (is_expected_node(rsc, node)) {
/* This could be a problem if the start becomes necessary for other
* reasons later.
*/
pe_rsc_trace(rsc,
"Start of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
pe__set_action_flags(start, pe_action_pseudo);
}
}
/*!
* \internal
* \brief Schedule actions needed to promote a resource on a node
*
* \param[in,out] rsc Resource being promoted
* \param[in] node Node where resource should be promoted
* \param[in] optional Whether actions should be optional
*/
static void
promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
GList *iter = NULL;
GList *action_list = NULL;
bool runnable = true;
CRM_ASSERT(node != NULL);
// Any start must be runnable for promotion to be runnable
action_list = pe__resource_actions(rsc, node, RSC_START, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pe_action_t *start = (pe_action_t *) iter->data;
if (!pcmk_is_set(start->flags, pe_action_runnable)) {
runnable = false;
}
}
g_list_free(action_list);
if (runnable) {
pe_action_t *promote = promote_action(rsc, node, optional);
pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pe__node_name(node));
if (is_expected_node(rsc, node)) {
/* This could be a problem if the promote becomes necessary for
* other reasons later.
*/
pe_rsc_trace(rsc,
"Promotion of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
pe__set_action_flags(promote, pe_action_pseudo);
}
} else {
pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
rsc->id, pe__node_name(node));
action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pe_action_t *promote = (pe_action_t *) iter->data;
pe__clear_action_flags(promote, pe_action_runnable);
}
g_list_free(action_list);
}
}
/*!
* \internal
* \brief Schedule actions needed to demote a resource wherever it is active
*
* \param[in,out] rsc Resource being demoted
* \param[in] node Node where resource should be demoted (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
/* Since this will only be called for a primitive (possibly as an instance
* of a collective resource), the resource is multiply active if it is
* running on more than one node, so we want to demote on all of them as
* part of recovery, regardless of which one is the desired node.
*/
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *current = (pe_node_t *) iter->data;
if (is_expected_node(rsc, current)) {
pe_rsc_trace(rsc,
"Skipping demote of multiply active resource %s "
"on expected node %s",
rsc->id, pe__node_name(current));
} else {
pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pe__node_name(current));
demote_action(rsc, current, optional);
}
}
}
static void
assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
CRM_ASSERT(false);
}
/*!
* \internal
* \brief Schedule cleanup of a resource
*
* \param[in,out] rsc Resource to clean up
* \param[in] node Node to clean up on
* \param[in] optional Whether clean-up should be optional
*/
void
pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
{
/* If the cleanup is required, its orderings are optional, because they're
* relevant only if both actions are required. Conversely, if the cleanup is
* optional, the orderings make the then action required if the first action
* becomes required.
*/
uint32_t flag = optional? pe_order_implies_then : pe_order_optional;
CRM_CHECK((rsc != NULL) && (node != NULL), return);
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
rsc->id, pe__node_name(node));
return;
}
if (node->details->unclean || !node->details->online) {
pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
rsc->id, pe__node_name(node));
return;
}
crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node));
delete_action(rsc, node, optional);
// stop -> clean-up -> start
pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE, flag);
pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START, flag);
}
/*!
* \internal
* \brief Add primitive meta-attributes relevant to graph actions to XML
*
* \param[in] rsc Primitive resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
char *value = NULL;
const pe_resource_t *parent = NULL;
CRM_ASSERT((rsc != NULL) && (xml != NULL));
/* Clone instance numbers get set internally as meta-attributes, and are
* needed in the transition graph (for example, to tell unique clone
* instances apart).
*/
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
if (value != NULL) {
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
// Not sure if this one is really needed ...
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value != NULL) {
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
/* The container meta-attribute can be set on the primitive itself or one of
* its parents (for example, a group inside a container resource), so check
* them all, and keep the highest one found.
*/
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container != NULL) {
crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER,
parent->container->id);
}
}
/* Bundle replica children will get their external-ip set internally as a
* meta-attribute. The graph action needs it, but under a different naming
* convention than other meta-attributes.
*/
value = g_hash_table_lookup(rsc->meta, "external-ip");
if (value != NULL) {
crm_xml_add(xml, "pcmk_external_ip", value);
}
}
// Primitive implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__primitive_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
orig_rsc->id, rsc->id);
pcmk__release_node_capacity(utilization, rsc);
}
/*!
* \internal
* \brief Get epoch time of node's shutdown attribute (or now if none)
*
* \param[in,out] node Node to check
*
* \return Epoch time corresponding to shutdown attribute if set or now if not
*/
static time_t
shutdown_time(pe_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
time_t result = 0;
if (shutdown != NULL) {
long long result_ll;
if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
result = (time_t) result_ll;
}
}
return (result == 0)? get_effective_time(node->details->data_set) : result;
}
/*!
* \internal
* \brief Ban a resource from a node if it's not locked to the node
*
* \param[in] data Node to check
* \param[in,out] user_data Resource to check
*/
static void
ban_if_not_locked(gpointer data, gpointer user_data)
{
const pe_node_t *node = (const pe_node_t *) data;
pe_resource_t *rsc = (pe_resource_t *) user_data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
resource_location(rsc, node, -CRM_SCORE_INFINITY,
XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
}
}
// Primitive implementation of resource_alloc_functions_t:shutdown_lock()
void
pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Fence devices and remote connections can't be locked
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
|| pe__resource_is_remote_conn(rsc, rsc->cluster)) {
return;
}
if (rsc->lock_node != NULL) {
// The lock was obtained from resource history
if (rsc->running_on != NULL) {
/* The resource was started elsewhere even though it is now
* considered locked. This shouldn't be possible, but as a
* failsafe, we don't want to disturb the resource now.
*/
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
pe_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
rsc->id, pe__node_name(node));
} else {
rsc->lock_node = node;
rsc->lock_time = shutdown_time(node);
}
}
}
if (rsc->lock_node == NULL) {
// No lock needed
return;
}
if (rsc->cluster->shutdown_lock > 0) {
time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, pe__node_name(rsc->lock_node),
(long long) lock_expiration);
pe__update_recheck_time(++lock_expiration, rsc->cluster);
} else {
pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, pe__node_name(rsc->lock_node));
}
// If resource is locked to one node, ban it from all other nodes
g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc);
}
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index 9f2e70a75a..7a023a8366 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -1,1088 +1,1088 @@
/*
* Copyright 2014-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdlib.h>
#include <string.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
// Resource allocation methods that vary by resource variant
static resource_alloc_functions_t allocation_methods[] = {
{
pcmk__primitive_assign,
pcmk__primitive_create_actions,
pcmk__probe_rsc_on_node,
pcmk__primitive_internal_constraints,
pcmk__primitive_apply_coloc_score,
pcmk__colocated_resources,
pcmk__apply_location,
pcmk__primitive_action_flags,
pcmk__update_ordered_actions,
pcmk__output_resource_actions,
pcmk__add_rsc_actions_to_graph,
pcmk__primitive_add_graph_meta,
pcmk__primitive_add_utilization,
pcmk__primitive_shutdown_lock,
},
{
pcmk__group_assign,
pcmk__group_create_actions,
pcmk__probe_rsc_on_node,
pcmk__group_internal_constraints,
pcmk__group_apply_coloc_score,
pcmk__group_colocated_resources,
pcmk__group_apply_location,
pcmk__group_action_flags,
pcmk__group_update_ordered_actions,
pcmk__output_resource_actions,
pcmk__add_rsc_actions_to_graph,
pcmk__noop_add_graph_meta,
pcmk__group_add_utilization,
pcmk__group_shutdown_lock,
},
{
pcmk__clone_allocate,
clone_create_actions,
clone_create_probe,
clone_internal_constraints,
pcmk__clone_apply_coloc_score,
pcmk__colocated_resources,
clone_rsc_location,
clone_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_resource_actions,
clone_expand,
clone_append_meta,
pcmk__clone_add_utilization,
pcmk__clone_shutdown_lock,
},
{
pcmk__bundle_allocate,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
pcmk__bundle_apply_coloc_score,
pcmk__colocated_resources,
pcmk__bundle_rsc_location,
pcmk__bundle_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_bundle_actions,
pcmk__bundle_expand,
pcmk__noop_add_graph_meta,
pcmk__bundle_add_utilization,
pcmk__bundle_shutdown_lock,
}
};
/*!
* \internal
* \brief Check whether a resource's agent standard, provider, or type changed
*
* \param[in,out] rsc Resource to check
* \param[in,out] node Node needing unfencing if agent changed
* \param[in] rsc_entry XML with previously known agent information
* \param[in] active_on_node Whether \p rsc is active on \p node
*
* \return true if agent for \p rsc changed, otherwise false
*/
bool
pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
const xmlNode *rsc_entry, bool active_on_node)
{
bool changed = false;
const char *attr_list[] = {
XML_ATTR_TYPE,
XML_AGENT_ATTR_CLASS,
XML_AGENT_ATTR_PROVIDER
};
for (int i = 0; i < PCMK__NELEM(attr_list); i++) {
const char *value = crm_element_value(rsc->xml, attr_list[i]);
const char *old_value = crm_element_value(rsc_entry, attr_list[i]);
if (!pcmk__str_eq(value, old_value, pcmk__str_none)) {
changed = true;
trigger_unfencing(rsc, node, "Device definition changed", NULL,
rsc->cluster);
if (active_on_node) {
crm_notice("Forcing restart of %s on %s "
"because %s changed from '%s' to '%s'",
rsc->id, pe__node_name(node), attr_list[i],
pcmk__s(old_value, ""), pcmk__s(value, ""));
}
}
}
if (changed && active_on_node) {
// Make sure the resource is restarted
custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
rsc->cluster);
pe__set_resource_flags(rsc, pe_rsc_start_pending);
}
return changed;
}
/*!
* \internal
* \brief Add resource (and any matching children) to list if it matches ID
*
* \param[in] result List to add resource to
* \param[in] rsc Resource to check
* \param[in] id ID to match
*
* \return (Possibly new) head of list
*/
static GList *
add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
{
if ((strcmp(rsc->id, id) == 0)
|| ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) {
result = g_list_prepend(result, rsc);
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *) iter->data;
result = add_rsc_if_matching(result, child, id);
}
return result;
}
/*!
* \internal
* \brief Find all resources matching a given ID by either ID or clone name
*
* \param[in] id Resource ID to check
* \param[in] data_set Cluster working set
*
* \return List of all resources that match \p id
* \note The caller is responsible for freeing the return value with
* g_list_free().
*/
GList *
pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set)
{
GList *result = NULL;
CRM_CHECK((id != NULL) && (data_set != NULL), return NULL);
for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id);
}
return result;
}
/*!
* \internal
* \brief Set the variant-appropriate allocation methods for a resource
*
* \param[in,out] rsc Resource to set allocation methods for
* \param[in] ignored Here so function can be used with g_list_foreach()
*/
static void
set_allocation_methods_for_rsc(pe_resource_t *rsc, void *ignored)
{
rsc->cmds = &allocation_methods[rsc->variant];
g_list_foreach(rsc->children, (GFunc) set_allocation_methods_for_rsc, NULL);
}
/*!
* \internal
* \brief Set the variant-appropriate allocation methods for all resources
*
* \param[in,out] data_set Cluster working set
*/
void
pcmk__set_allocation_methods(pe_working_set_t *data_set)
{
g_list_foreach(data_set->resources, (GFunc) set_allocation_methods_for_rsc,
NULL);
}
// Shared implementation of resource_alloc_functions_t:colocated_resources()
GList *
pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc,
GList *colocated_rscs)
{
const GList *iter = NULL;
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) {
return colocated_rscs;
}
pe_rsc_trace(orig_rsc, "%s is in colocation chain with %s",
rsc->id, orig_rsc->id);
colocated_rscs = g_list_append(colocated_rscs, (gpointer) rsc);
// Follow colocations where this resource is the dependent resource
for (iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
const pe_resource_t *primary = constraint->primary;
if (primary == orig_rsc) {
continue; // Break colocation loop
}
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(rsc, primary, constraint,
true) == pcmk__coloc_affects_location)) {
colocated_rscs = primary->cmds->colocated_resources(primary,
orig_rsc,
colocated_rscs);
}
}
// Follow colocations where this resource is the primary resource
for (iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
const pe_resource_t *dependent = constraint->dependent;
if (dependent == orig_rsc) {
continue; // Break colocation loop
}
if (pe_rsc_is_clone(rsc) && !pe_rsc_is_clone(dependent)) {
continue; // We can't be sure whether dependent will be colocated
}
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(dependent, rsc, constraint,
true) == pcmk__coloc_affects_location)) {
colocated_rscs = dependent->cmds->colocated_resources(dependent,
orig_rsc,
colocated_rscs);
}
}
return colocated_rscs;
}
// No-op function for variants that don't need to implement add_graph_meta()
void
pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
{
}
void
pcmk__output_resource_actions(pe_resource_t *rsc)
{
pcmk__output_t *out = rsc->cluster->priv;
pe_node_t *next = NULL;
pe_node_t *current = NULL;
if (rsc->children != NULL) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *) iter->data;
child->cmds->output_actions(child);
}
return;
}
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
if (rsc->role == RSC_ROLE_STOPPED) {
/* This can occur when resources are being recovered because
* the current role can change in pcmk__primitive_create_actions()
*/
rsc->role = RSC_ROLE_STARTED;
}
}
if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't log stopped orphans */
return;
}
out->message(out, "rsc-action", rsc, current, next);
}
/*!
* \internal
* \brief Assign a specified primitive resource to a node
*
* Assign a specified primitive resource to a specified node, if the node can
* run the resource (or unconditionally, if \p force is true). Mark the resource
* as no longer provisional. If the primitive can't be assigned (or \p chosen is
* NULL), unassign any previous assignment for it, set its next role to stopped,
* and update any existing actions scheduled for it. This is not done
* recursively for children, so it should be called only for primitives.
*
* \param[in,out] rsc Resource to assign
* \param[in,out] chosen Node to assign \p rsc to
* \param[in] force If true, assign to \p chosen even if unavailable
*
* \return true if \p rsc could be assigned, otherwise false
*
* \note Assigning a resource to the NULL node using this function is different
* from calling pcmk__unassign_resource(), in that it will also update any
* actions created for the resource.
*/
bool
pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force)
{
pcmk__output_t *out = rsc->cluster->priv;
CRM_ASSERT(rsc->variant == pe_native);
if (!force && (chosen != NULL)) {
if ((chosen->weight < 0)
// Allow the graph to assume that guest node connections will come up
|| (!pcmk__node_available(chosen, true, false)
&& !pe__is_guest_node(chosen))) {
crm_debug("All nodes for resource %s are unavailable, unclean or "
"shutting down (%s can%s run resources, with weight %d)",
rsc->id, pe__node_name(chosen),
(pcmk__node_available(chosen, true, false)? "" : "not"),
chosen->weight);
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
chosen = NULL;
}
}
pcmk__unassign_resource(rsc);
pe__clear_resource_flags(rsc, pe_rsc_provisional);
if (chosen == NULL) {
crm_debug("Could not allocate a node for %s", rsc->id);
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate");
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
pe_action_t *op = (pe_action_t *) iter->data;
crm_debug("Updating %s for allocation failure", op->uuid);
if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) {
pe__clear_action_flags(op, pe_action_optional);
} else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) {
pe__clear_action_flags(op, pe_action_runnable);
//pe__set_resource_flags(rsc, pe_rsc_block);
} else {
// Cancel recurring actions, unless for stopped state
const char *interval_ms_s = NULL;
const char *target_rc_s = NULL;
char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
interval_ms_s = g_hash_table_lookup(op->meta,
XML_LRM_ATTR_INTERVAL_MS);
target_rc_s = g_hash_table_lookup(op->meta,
XML_ATTR_TE_TARGET_RC);
if ((interval_ms_s != NULL)
&& !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none)
&& !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
pe__clear_action_flags(op, pe_action_runnable);
}
free(rc_stopped);
}
}
return false;
}
crm_debug("Assigning %s to %s", rsc->id, pe__node_name(chosen));
rsc->allocated_to = pe__copy_node(chosen);
chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc,
rsc);
chosen->details->num_resources++;
chosen->count++;
pcmk__consume_node_capacity(chosen->details->utilization, rsc);
if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
out->message(out, "resource-util", rsc, chosen, __func__);
}
return true;
}
/*!
* \internal
* \brief Assign a specified resource (of any variant) to a node
*
* Assign a specified resource and its children (if any) to a specified node, if
* the node can run the resource (or unconditionally, if \p force is true). Mark
* the resources as no longer provisional. If the resources can't be assigned
* (or \p chosen is NULL), unassign any previous assignments, set next role to
* stopped, and update any existing actions scheduled for them.
*
* \param[in,out] rsc Resource to assign
* \param[in,out] chosen Node to assign \p rsc to
* \param[in] force If true, assign to \p chosen even if unavailable
*
* \return true if \p rsc could be assigned, otherwise false
*
* \note Assigning a resource to the NULL node using this function is different
* from calling pcmk__unassign_resource(), in that it will also update any
* actions created for the resource.
*/
bool
pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
{
bool changed = false;
if (rsc->children == NULL) {
if (rsc->allocated_to != NULL) {
changed = true;
}
pcmk__finalize_assignment(rsc, node, force);
} else {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
changed |= pcmk__assign_resource(child_rsc, node, force);
}
}
return changed;
}
/*!
* \internal
* \brief Remove any assignment of a specified resource to a node
*
* If a specified resource has been assigned to a node, remove that assignment
* and mark the resource as provisional again. This is not done recursively for
* children, so it should be called only for primitives.
*
* \param[in,out] rsc Resource to unassign
*/
void
pcmk__unassign_resource(pe_resource_t *rsc)
{
pe_node_t *old = rsc->allocated_to;
if (old == NULL) {
return;
}
crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
pe__set_resource_flags(rsc, pe_rsc_provisional);
rsc->allocated_to = NULL;
/* We're going to free the pe_node_t, but its details member is shared and
* will remain, so update that appropriately first.
*/
old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
rsc);
old->details->num_resources--;
pcmk__release_node_capacity(old->details->utilization, rsc);
free(old);
}
/*!
* \internal
* \brief Check whether a resource has reached its migration threshold on a node
*
* \param[in,out] rsc Resource to check
* \param[in] node Node to check
* \param[out] failed If threshold has been reached, this will be set to
* resource that failed (possibly a parent of \p rsc)
*
* \return true if the migration threshold has been reached, false otherwise
*/
bool
pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
pe_resource_t **failed)
{
int fail_count, remaining_tries;
pe_resource_t *rsc_to_ban = rsc;
// Migration threshold of 0 means never force away
if (rsc->migration_threshold == 0) {
return false;
}
// If we're ignoring failures, also ignore the migration threshold
if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
return false;
}
// If there are no failures, there's no need to force away
fail_count = pe_get_failcount(node, rsc, NULL,
pe_fc_effective|pe_fc_fillers, NULL);
if (fail_count <= 0) {
return false;
}
// If failed resource is anonymous clone instance, we'll force clone away
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
rsc_to_ban = uber_parent(rsc);
}
// How many more times recovery will be tried on this node
remaining_tries = rsc->migration_threshold - fail_count;
if (remaining_tries <= 0) {
crm_warn("%s cannot run on %s due to reaching migration threshold "
"(clean up resource to allow again)"
CRM_XS " failures=%d migration-threshold=%d",
rsc_to_ban->id, pe__node_name(node), fail_count,
rsc->migration_threshold);
if (failed != NULL) {
*failed = rsc_to_ban;
}
return true;
}
crm_info("%s can fail %d more time%s on "
"%s before reaching migration threshold (%d)",
rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries),
pe__node_name(node), rsc->migration_threshold);
return false;
}
static void *
convert_const_pointer(const void *ptr)
{
/* Worst function ever */
return (void *)ptr;
}
/*!
* \internal
* \brief Get a node's weight
*
* \param[in] node Unweighted node to check (for node ID)
* \param[in] nodes List of weighted nodes to look for \p node in
*
* \return Node's weight, or -INFINITY if not found
*/
static int
get_node_weight(const pe_node_t *node, GHashTable *nodes)
{
pe_node_t *weighted_node = NULL;
if ((node != NULL) && (nodes != NULL)) {
weighted_node = g_hash_table_lookup(nodes, node->details->id);
}
return (weighted_node == NULL)? -INFINITY : weighted_node->weight;
}
/*!
* \internal
* \brief Compare two resources according to which should be allocated first
*
* \param[in] a First resource to compare
* \param[in] b Second resource to compare
* \param[in] data Sorted list of all nodes in cluster
*
* \return -1 if \p a should be allocated before \b, 0 if they are equal,
* or +1 if \p a should be allocated after \b
*/
static gint
cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
{
const pe_resource_t *resource1 = a;
const pe_resource_t *resource2 = b;
const GList *nodes = (const GList *) data;
int rc = 0;
int r1_weight = -INFINITY;
int r2_weight = -INFINITY;
pe_node_t *r1_node = NULL;
pe_node_t *r2_node = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
const char *reason = NULL;
// Resources with highest priority should be allocated first
reason = "priority";
r1_weight = resource1->priority;
r2_weight = resource2->priority;
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
// We need nodes to make any other useful comparisons
reason = "no node list";
if (nodes == NULL) {
goto done;
}
// Calculate and log node weights
pcmk__add_colocated_node_scores(convert_const_pointer(resource1),
resource1->id, &r1_nodes, NULL, 1,
pcmk__coloc_select_this_with);
pcmk__add_colocated_node_scores(convert_const_pointer(resource2),
resource2->id, &r2_nodes, NULL, 1,
pcmk__coloc_select_this_with);
pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
resource1->cluster);
pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
resource2->cluster);
// The resource with highest score on its current node goes first
reason = "current location";
if (resource1->running_on != NULL) {
r1_node = pe__current_node(resource1);
}
if (resource2->running_on != NULL) {
r2_node = pe__current_node(resource2);
}
r1_weight = get_node_weight(r1_node, r1_nodes);
r2_weight = get_node_weight(r2_node, r2_nodes);
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
// Otherwise a higher weight on any node will do
reason = "score";
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
const pe_node_t *node = (const pe_node_t *) iter->data;
r1_weight = get_node_weight(node, r1_nodes);
r2_weight = get_node_weight(node, r2_nodes);
if (r1_weight > r2_weight) {
rc = -1;
goto done;
}
if (r1_weight < r2_weight) {
rc = 1;
goto done;
}
}
done:
crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
resource1->id, r1_weight,
((r1_node == NULL)? "" : " on "),
((r1_node == NULL)? "" : r1_node->details->id),
((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
resource2->id, r2_weight,
((r2_node == NULL)? "" : " on "),
((r2_node == NULL)? "" : r2_node->details->id),
reason);
if (r1_nodes != NULL) {
g_hash_table_destroy(r1_nodes);
}
if (r2_nodes != NULL) {
g_hash_table_destroy(r2_nodes);
}
return rc;
}
/*!
* \internal
* \brief Sort resources in the order they should be allocated to nodes
*
* \param[in,out] data_set Cluster working set
*/
void
pcmk__sort_resources(pe_working_set_t *data_set)
{
GList *nodes = g_list_copy(data_set->nodes);
nodes = pcmk__sort_nodes(nodes, NULL);
data_set->resources = g_list_sort_with_data(data_set->resources,
cmp_resources, nodes);
g_list_free(nodes);
}
/*!
* \internal
* \brief Create a hash table with a single node in it
*
* \param[in] node Node to copy into new table
*
* \return Newly created hash table containing a copy of \p node
* \note The caller is responsible for freeing the result with
* g_hash_table_destroy().
*/
static GHashTable *
new_node_table(pe_node_t *node)
{
GHashTable *table = pcmk__strkey_table(NULL, free);
node = pe__copy_node(node);
g_hash_table_insert(table, (gpointer) node->details->id, node);
return table;
}
/*!
* \internal
* \brief Apply a resource's parent's colocation scores to a node table
*
* \param[in] rsc Resource whose colocations should be applied
* \param[in,out] nodes Node table to apply colocations to
*/
static void
apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
{
GList *iter = NULL;
pcmk__colocation_t *colocation = NULL;
for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
colocation = (pcmk__colocation_t *) iter->data;
pcmk__add_colocated_node_scores(colocation->primary, rsc->id, nodes,
colocation->node_attribute,
colocation->score / (float) INFINITY,
pcmk__coloc_select_default);
}
for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
colocation = (pcmk__colocation_t *) iter->data;
if (!pcmk__colocation_has_influence(colocation, rsc)) {
continue;
}
pcmk__add_colocated_node_scores(colocation->dependent, rsc->id, nodes,
colocation->node_attribute,
colocation->score / (float) INFINITY,
pcmk__coloc_select_nonnegative);
}
}
/*!
* \internal
* \brief Compare clone or bundle instances based on colocation scores
*
* Determine the relative order in which two clone or bundle instances should be
* assigned to nodes, considering the scores of colocation constraints directly
* or indirectly involving them.
*
* \param[in] instance1 First instance to compare
* \param[in] instance2 Second instance to compare
*
* \return A negative number if \p instance1 should be assigned first,
* a positive number if \p instance2 should be assigned first,
* or 0 if assignment order doesn't matter
*/
static int
cmp_instance_by_colocation(const pe_resource_t *instance1,
const pe_resource_t *instance2)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
pe_node_t *current_node1 = pe__current_node(instance1);
pe_node_t *current_node2 = pe__current_node(instance2);
GHashTable *colocated_scores1 = NULL;
GHashTable *colocated_scores2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
&& (instance2 != NULL) && (instance2->parent != NULL)
&& (current_node1 != NULL) && (current_node2 != NULL));
// Create node tables initialized with each node
colocated_scores1 = new_node_table(current_node1);
colocated_scores2 = new_node_table(current_node2);
// Apply parental colocations
apply_parent_colocations(instance1, &colocated_scores1);
apply_parent_colocations(instance2, &colocated_scores2);
// Find original nodes again, with scores updated for colocations
node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
// Compare nodes by updated scores
if (node1->weight < node2->weight) {
crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
instance1->id, node1->weight, pe__node_name(node1),
instance2->id, node2->weight, pe__node_name(node2));
rc = 1;
} else if (node1->weight > node2->weight) {
crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
instance1->id, node1->weight, pe__node_name(node1),
instance2->id, node2->weight, pe__node_name(node2));
rc = -1;
}
g_hash_table_destroy(colocated_scores1);
g_hash_table_destroy(colocated_scores2);
return rc;
}
/*!
* \internal
* \brief Check whether a resource or any of its children are failed
*
* \param[in] rsc Resource to check
*
* \return true if \p rsc or any of its children are failed, otherwise false
*/
static bool
did_fail(const pe_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
if (did_fail((const pe_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node is allowed to run a resource
*
* \param[in] rsc Resource to check
* \param[in,out] node Node to check (will be set NULL if not allowed)
*
* \return true if *node is either NULL or allowed for \p rsc, otherwise false
*/
static bool
node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
{
if (*node != NULL) {
pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
(*node)->details->id);
if ((allowed == NULL) || (allowed->weight < 0)) {
pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
rsc->id, pe__node_name(*node));
*node = NULL;
return false;
}
}
return true;
}
/*!
* \internal
* \brief Compare two clone or bundle instances' instance numbers
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a's instance number is lower,
* a positive number if \p b's instance number is lower,
* or 0 if their instance numbers are the same
*/
gint
pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
{
const pe_resource_t *instance1 = (const pe_resource_t *) a;
const pe_resource_t *instance2 = (const pe_resource_t *) b;
char *div1 = NULL;
char *div2 = NULL;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
// Clone numbers are after a colon, bundle numbers after a dash
div1 = strrchr(instance1->id, ':');
if (div1 == NULL) {
div1 = strrchr(instance1->id, '-');
}
div2 = strrchr(instance2->id, ':');
if (div2 == NULL) {
div2 = strrchr(instance2->id, '-');
}
CRM_ASSERT((div1 != NULL) && (div2 != NULL));
return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
}
/*!
* \internal
* \brief Compare clone or bundle instances according to assignment order
*
* Compare two clone or bundle instances according to the order they should be
* assigned to nodes, preferring (in order):
*
* - Active instance that is less multiply active
* - Instance that is not active on a disallowed node
* - Instance with higher configured priority
* - Active instance whose current node can run resources
* - Active instance whose parent is allowed on current node
* - Active instance whose current node has fewer other instances
* - Active instance
* - Failed instance
* - Instance whose colocations result in higher score on current node
* - Instance with lower ID in lexicographic order
*
* \param[in] a First instance to compare
* \param[in] b Second instance to compare
*
* \return A negative number if \p a should be assigned first,
* a positive number if \p b should be assigned first,
* or 0 if assignment order doesn't matter
*/
gint
pcmk__cmp_instance(gconstpointer a, gconstpointer b)
{
int rc = 0;
pe_node_t *node1 = NULL;
pe_node_t *node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
bool can1 = true;
bool can2 = true;
const pe_resource_t *instance1 = (const pe_resource_t *) a;
const pe_resource_t *instance2 = (const pe_resource_t *) b;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
- node1 = pe__find_active_on(instance1, &nnodes1, NULL);
- node2 = pe__find_active_on(instance2, &nnodes2, NULL);
+ node1 = instance1->fns->active_node(instance1, &nnodes1, NULL);
+ node2 = instance2->fns->active_node(instance2, &nnodes2, NULL);
/* If both instances are running and at least one is multiply
* active, prefer instance that's running on fewer nodes.
*/
if ((nnodes1 > 0) && (nnodes2 > 0)) {
if (nnodes1 < nnodes2) {
crm_trace("Assign %s (active on %d) before %s (active on %d): "
"less multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return -1;
} else if (nnodes1 > nnodes2) {
crm_trace("Assign %s (active on %d) after %s (active on %d): "
"more multiply active",
instance1->id, nnodes1, instance2->id, nnodes2);
return 1;
}
}
/* An instance that is either inactive or active on an allowed node is
* preferred over an instance that is active on a no-longer-allowed node.
*/
can1 = node_is_allowed(instance1, &node1);
can2 = node_is_allowed(instance2, &node2);
if (can1 && !can2) {
crm_trace("Assign %s before %s: not active on a disallowed node",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: active on a disallowed node",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher configured priority
if (instance1->priority > instance2->priority) {
crm_trace("Assign %s before %s: priority (%d > %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return -1;
} else if (instance1->priority < instance2->priority) {
crm_trace("Assign %s after %s: priority (%d < %d)",
instance1->id, instance2->id,
instance1->priority, instance2->priority);
return 1;
}
// Prefer active instance
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: inactive",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node can run resources
can1 = pcmk__node_available(node1, false, false);
can2 = pcmk__node_available(node2, false, false);
if (can1 && !can2) {
crm_trace("Assign %s before %s: current node can run resources",
instance1->id, instance2->id);
return -1;
} else if (!can1 && can2) {
crm_trace("Assign %s after %s: current node can't run resources",
instance1->id, instance2->id);
return 1;
}
// Prefer instance whose parent is allowed to run on instance's current node
node1 = pcmk__top_allowed_node(instance1, node1);
node2 = pcmk__top_allowed_node(instance2, node2);
if ((node1 == NULL) && (node2 == NULL)) {
crm_trace("No assignment preference for %s vs. %s: "
"parent not allowed on either instance's current node",
instance1->id, instance2->id);
return 0;
} else if (node1 == NULL) {
crm_trace("Assign %s after %s: parent not allowed on current node",
instance1->id, instance2->id);
return 1;
} else if (node2 == NULL) {
crm_trace("Assign %s before %s: parent allowed on current node",
instance1->id, instance2->id);
return -1;
}
// Prefer instance whose current node is running fewer other instances
if (node1->count < node2->count) {
crm_trace("Assign %s before %s: fewer active instances on current node",
instance1->id, instance2->id);
return -1;
} else if (node1->count > node2->count) {
crm_trace("Assign %s after %s: more active instances on current node",
instance1->id, instance2->id);
return 1;
}
// Prefer failed instance
can1 = did_fail(instance1);
can2 = did_fail(instance2);
if (!can1 && can2) {
crm_trace("Assign %s before %s: failed", instance1->id, instance2->id);
return -1;
} else if (can1 && !can2) {
crm_trace("Assign %s after %s: not failed",
instance1->id, instance2->id);
return 1;
}
// Prefer instance with higher cumulative colocation score on current node
rc = cmp_instance_by_colocation(instance1, instance2);
if (rc != 0) {
return rc;
}
// Prefer instance with lower instance number
rc = pcmk__cmp_instance_number(instance1, instance2);
if (rc < 0) {
crm_trace("Assign %s before %s: instance number",
instance1->id, instance2->id);
} else if (rc > 0) {
crm_trace("Assign %s after %s: instance number",
instance1->id, instance2->id);
} else {
crm_trace("No assignment preference for %s vs. %s",
instance1->id, instance2->id);
}
return rc;
}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index bdf3256ada..fb004a0415 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1156 +1,1157 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
+static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
+ unsigned int *count_clean);
+
resource_object_functions_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_print,
native_active,
native_resource_state,
native_location,
native_free,
pe__count_common,
pe__native_is_filtered,
+ active_node,
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_print,
group_active,
group_resource_state,
native_location,
group_free,
pe__count_common,
pe__group_is_filtered,
+ active_node,
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free,
pe__count_common,
pe__clone_is_filtered,
+ active_node,
},
{
pe__unpack_bundle,
native_find_rsc,
native_parameter,
pe__print_bundle,
pe__bundle_active,
pe__bundle_resource_state,
native_location,
pe__free_bundle,
pe__count_bundle,
pe__bundle_is_filtered,
+ active_node,
}
};
static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
return pe_native;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
return pe_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
return pe_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
return pe_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
return pe_container;
}
return pe_unknown;
}
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
add_hash_param(user_data, key, value);
}
static void
expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
pe_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
}
/* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
rule_data, parent_orig_meta, NULL, FALSE, data_set);
p = p->parent;
}
/* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
if (parent_orig_meta != NULL) {
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, parent_orig_meta);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
/* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
/* Attributes that already exist in the child lease are not updated. */
dup_attr(key, value, meta_hash);
}
}
if (parent_orig_meta != NULL) {
g_hash_table_destroy(parent_orig_meta);
}
return ;
}
void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
const char *prop_value = crm_element_value(rsc->xml, prop_name);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
}
/* check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
if (rsc->parent) {
g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
}
}
void
get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
const pe_node_t *node, pe_working_set_t *data_set)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
}
}
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, "name");
const char *role = crm_element_value(op, "role");
char *key = NULL;
if ((role == NULL)
|| pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
role = RSC_ROLE_UNKNOWN_S;
}
key = crm_strdup_printf("%s-%s", name, role);
return key;
}
static gboolean
unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *clone = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
}
template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
XML_ATTR_ID, template_ref);
if (template == NULL) {
pe_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_replace(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
}
template_ops = find_xml_node(new_xml, "operations", FALSE);
for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
child_xml = pcmk__xe_next(child_xml)) {
xmlNode *new_child = NULL;
new_child = add_node_copy(new_xml, child_xml);
if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = pcmk__xe_first_child(template_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
add_node_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
free_xml(template_ops);
}
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
/* Disable multi-level templates for now */
/*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
} */
return TRUE;
}
static gboolean
add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for processing resource list of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
static bool
detect_promotable(pe_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
if (crm_is_true(promotable)) {
return TRUE;
}
// @COMPAT deprecated since 2.0.0
if (pcmk__str_eq(crm_element_name(rsc->xml), PCMK_XE_PROMOTABLE_LEGACY,
pcmk__str_casei)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
strdup(XML_BOOLEAN_TRUE));
return TRUE;
}
return FALSE;
}
static void
free_params_table(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \brief Get a table of resource parameters
*
* \param[in,out] rsc Resource to query
* \param[in] node Node for evaluating rules (NULL for defaults)
* \param[in,out] data_set Cluster working set
*
* \return Hash table containing resource parameter names and values
* (or NULL if \p rsc or \p data_set is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
pe_working_set_t *data_set)
{
GHashTable *params_on_node = NULL;
/* A NULL node is used to request the resource's default parameters
* (not evaluated for node), but we always want something non-NULL
* as a hash table key.
*/
const char *node_name = "";
// Sanity check
if ((rsc == NULL) || (data_set == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
node_name = node->details->uname;
}
// Find the parameter table for given node
if (rsc->parameter_cache == NULL) {
rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
} else {
params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
}
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
get_rsc_attributes(params_on_node, rsc, node, data_set);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
return params_on_node;
}
/*!
* \internal
* \brief Unpack a resource's "requires" meta-attribute
*
* \param[in,out] rsc Resource being unpacked
* \param[in] value Value of "requires" meta-attribute
* \param[in] is_default Whether \p value was selected by default
*/
static void
unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else {
pe__set_resource_flags(rsc,
pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
} else if ((rsc->variant == pe_native)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
value = PCMK__VALUE_FENCING;
} else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
value = PCMK__VALUE_QUORUM;
}
if (orig_value != NULL) {
pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to '%s' because '%s' is not valid",
rsc->id, value, orig_value);
}
unpack_requires(rsc, value, true);
return;
}
pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
(is_default? " (default)" : ""));
}
/*!
* \internal
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
* \c pe_resource_t object.
*
* \param[in] xml_obj XML node containing the resource's configuration
* \param[out] rsc Where to store the unpacked resource information
* \param[in] parent Resource's parent, if any
* \param[in,out] data_set Cluster working set
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
* the caller is responsible for freeing it using its variant-specific
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_resource_t *parent, pe_working_set_t *data_set)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
const char *value = NULL;
const char *id = NULL;
bool guest_node = false;
bool remote_node = false;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
CRM_CHECK(rsc != NULL, return EINVAL);
CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
*rsc = NULL;
return EINVAL);
rule_data.now = data_set->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
crm_element_name(xml_obj));
return pcmk_rc_unpack_error;
}
if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
return pcmk_rc_unpack_error;
}
*rsc = calloc(1, sizeof(pe_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
(*rsc)->cluster = data_set;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
(*rsc)->xml = expanded_xml;
(*rsc)->orig_xml = xml_obj;
} else {
(*rsc)->xml = xml_obj;
(*rsc)->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
(*rsc)->ops_xml = expand_idref(ops, data_set->input);
(*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
if ((*rsc)->variant == pe_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
id, crm_element_name((*rsc)->xml));
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
(*rsc)->meta = pcmk__strkey_table(free, free);
(*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
(*rsc)->known_on = pcmk__strkey_table(NULL, free);
value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
if (value) {
(*rsc)->id = crm_strdup_printf("%s:%s", id, value);
add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
} else {
(*rsc)->id = strdup(id);
}
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
(*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
(*rsc)->flags = 0;
pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe__set_resource_flags(*rsc, pe_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = RSC_ROLE_STOPPED;
(*rsc)->next_role = RSC_ROLE_UNKNOWN;
(*rsc)->recovery_type = recovery_stop_start;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
(*rsc)->priority = char2score(value);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
guest_node = true;
} else {
remote_node = true;
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_managed);
} else {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_unique);
}
if (detect_promotable(*rsc)) {
pe__set_resource_flags(*rsc, pe_rsc_promotable);
}
} else {
pe__set_resource_flags(*rsc, pe_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
pe_warn_once(pe_wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
(*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
(*rsc)->recovery_type = recovery_stop_only;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
(*rsc)->recovery_type = recovery_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
(*rsc)->recovery_type = recovery_stop_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
} else { // "stop_start"
if (!pcmk__str_eq(value, "stop_start",
pcmk__str_casei|pcmk__str_null_matches)) {
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
(*rsc)->recovery_type = recovery_stop_start;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->stickiness = char2score(value);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
/* @TODO We use 1 here to preserve previous behavior, but this
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
pe_warn_once(pe_wo_neg_threshold,
XML_RSC_ATTR_FAIL_STICKINESS
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
}
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
pe__set_resource_flags(*rsc, pe_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
* reconnect_interval.
*
* @TODO Evaluate per node before using
*/
value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
(*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
/* we want to override any default failure_timeout in use when remote
* reconnect_interval is in use. */
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
(*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
(*rsc)->utilization, NULL, FALSE, data_set);
if (expanded_xml) {
if (add_template_rsc(xml_obj, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
}
return pcmk_rc_ok;
}
void
common_update_score(pe_resource_t * rsc, const char *id, int score)
{
pe_node_t *node = NULL;
node = pe_hash_table_lookup(rsc->allowed_nodes, id);
if (node != NULL) {
pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score);
node->weight = pcmk__add_scores(node->weight, score);
}
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
common_update_score(child_rsc, id, score);
}
}
}
gboolean
is_parent(pe_resource_t *child, pe_resource_t *rsc)
{
pe_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
while (parent->parent != NULL) {
if (parent->parent == rsc) {
return TRUE;
}
parent = parent->parent;
}
return FALSE;
}
pe_resource_t *
uber_parent(pe_resource_t * rsc)
{
pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL && parent->parent->variant != pe_container) {
parent = parent->parent;
}
return parent;
}
/*!
* \internal
* \brief Get the topmost parent of a resource as a const pointer
*
* \param[in] rsc Resource to check
* \param[in] include_bundle If true, go all the way to bundle
*
* \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
const pe_resource_t *
pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
{
const pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
if (!include_bundle && (parent->parent->variant == pe_container)) {
break;
}
parent = parent->parent;
}
return parent;
}
void
common_free(pe_resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameter_cache != NULL) {
g_hash_table_destroy(rsc->parameter_cache);
}
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
rsc->orig_xml = NULL;
/* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
} else if (rsc->orig_xml) {
free_xml(rsc->xml);
rsc->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
pe_rsc_trace(rsc, "Resource freed");
free(rsc->id);
free(rsc->clone_name);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc);
}
-/*!
- * \brief
- * \internal Find a node (and optionally count all) where resource is active
- *
- * \param[in] rsc Resource to check
- * \param[out] count_all If not NULL, will be set to count of active nodes
- * \param[out] count_clean If not NULL, will be set to count of clean nodes
- *
- * \return An active node (or NULL if resource is not active anywhere)
- *
- * \note The order of preference is: an active node that is the resource's
- * partial migration source; if the resource's "requires" is "quorum" or
- * "nothing", the first active node in the list that is clean and online;
- * the first active node in the list.
- */
-pe_node_t *
-pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all,
- unsigned int *count_clean)
+// Shared implementation of resource_object_functions_t:active_node()
+static pe_node_t *
+active_node(const pe_resource_t *rsc, unsigned int *count_all,
+ unsigned int *count_clean)
{
pe_node_t *active = NULL;
pe_node_t *node = NULL;
bool keep_looking = FALSE;
bool is_happy = FALSE;
if (count_all) {
*count_all = 0;
}
if (count_clean) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
for (GList *node_iter = rsc->running_on; node_iter != NULL;
node_iter = node_iter->next) {
node = node_iter->data;
keep_looking = FALSE;
is_happy = node->details->online && !node->details->unclean;
if (count_all) {
++*count_all;
}
if (count_clean && is_happy) {
++*count_clean;
}
if (count_all || count_clean) {
// If we're counting, we need to go through entire list
keep_looking = TRUE;
}
if (rsc->partial_migration_source != NULL) {
if (node->details == rsc->partial_migration_source->details) {
// This is the migration source
active = node;
} else {
keep_looking = TRUE;
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
if (is_happy && (!active || !active->details->online
|| active->details->unclean)) {
// This is the first clean node
active = node;
} else {
keep_looking = TRUE;
}
}
if (active == NULL) {
// This is first node in list
active = node;
}
if (keep_looking == FALSE) {
// Don't waste time iterating if we don't have to
break;
}
}
return active;
}
/*!
* \brief
* \internal Find and count active nodes according to "requires"
*
* \param[in] rsc Resource to check
* \param[out] count If not NULL, will be set to count of active nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
- * \note This is a convenience wrapper for pe__find_active_on() where the count
- * of all active nodes or only clean active nodes is desired according to
- * the "requires" meta-attribute.
+ * \note This is a convenience wrapper for active_node() where the count of all
+ * active nodes or only clean active nodes is desired according to the
+ * "requires" meta-attribute.
*/
pe_node_t *
pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
{
- if (rsc && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
- return pe__find_active_on(rsc, NULL, count);
+ if (rsc == NULL) {
+ if (count != NULL) {
+ *count = 0;
+ }
+ return NULL;
+
+ } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ return rsc->fns->active_node(rsc, count, NULL);
+
+ } else {
+ return rsc->fns->active_node(rsc, NULL, count);
}
- return pe__find_active_on(rsc, count, NULL);
}
void
pe__count_common(pe_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
((pe_resource_t *) item->data)->fns->count(item->data);
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role > RSC_ROLE_STOPPED)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
rsc->cluster->blocked_resources++;
}
}
}
/*!
* \internal
* \brief Update a resource's next role
*
* \param[in,out] rsc Resource to be updated
* \param[in] role Resource's new next role
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
rsc->id, role2text(rsc->next_role), role2text(role), why);
rsc->next_role = role;
}
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 4d1c95299c..4a89c647d4 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,2173 +1,2173 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm_resource.h>
#include <crm/lrmd_internal.h>
#include <crm/common/cmdline_internal.h>
#include <crm/common/ipc_attrd_internal.h>
#include <crm/common/lists_internal.h>
#include <crm/common/output.h>
#include <pacemaker-internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/stonith-ng.h>
#include <crm/common/ipc_controld.h>
#include <crm/cib/internal.h>
#define SUMMARY "crm_resource - perform tasks related to Pacemaker cluster resources"
enum rsc_command {
cmd_none = 0, // No command option given (yet)
cmd_ban,
cmd_cleanup,
cmd_clear,
cmd_colocations,
cmd_cts,
cmd_delete,
cmd_delete_param,
cmd_digests,
cmd_execute_agent,
cmd_fail,
cmd_get_param,
cmd_get_property,
cmd_list_active_ops,
cmd_list_agents,
cmd_list_all_ops,
cmd_list_alternatives,
cmd_list_instances,
cmd_list_providers,
cmd_list_resources,
cmd_list_standards,
cmd_locate,
cmd_metadata,
cmd_move,
cmd_query_raw_xml,
cmd_query_xml,
cmd_refresh,
cmd_restart,
cmd_set_param,
cmd_set_property,
cmd_wait,
cmd_why,
};
struct {
enum rsc_command rsc_cmd; // crm_resource command to perform
// Infrastructure that given command needs to work
gboolean require_cib; // Whether command requires CIB IPC
int cib_options; // Options to use with CIB IPC calls
gboolean require_crmd; // Whether command requires controller IPC
gboolean require_dataset; // Whether command requires populated data set
gboolean require_resource; // Whether command requires resource specified
gboolean require_node; // Whether command requires node specified
int find_flags; // Flags to use when searching for resource
// Command-line option values
gchar *rsc_id; // Value of --resource
gchar *rsc_type; // Value of --resource-type
gboolean force; // --force was given
gboolean clear_expired; // --expired was given
gboolean recursive; // --recursive was given
gboolean promoted_role_only; // --promoted was given
gchar *host_uname; // Value of --node
gchar *interval_spec; // Value of --interval
gchar *move_lifetime; // Value of --lifetime
gchar *operation; // Value of --operation
const char *attr_set_type; // Instance, meta, or utilization attribute
gchar *prop_id; // --nvpair (attribute XML ID)
char *prop_name; // Attribute name
gchar *prop_set; // --set-name (attribute block XML ID)
gchar *prop_value; // --parameter-value (attribute value)
int timeout_ms; // Parsed from --timeout value
char *agent_spec; // Standard and/or provider and/or agent
gchar *xml_file; // Value of (deprecated) --xml-file
int check_level; // Optional value of --validate or --force-check
// Resource configuration specified via command-line arguments
gboolean cmdline_config; // Resource configuration was via arguments
char *v_agent; // Value of --agent
char *v_class; // Value of --class
char *v_provider; // Value of --provider
GHashTable *cmdline_params; // Resource parameters specified
// Positional command-line arguments
gchar **remainder; // Positional arguments as given
GHashTable *override_params; // Resource parameter values that override config
} options = {
.attr_set_type = XML_TAG_ATTR_SETS,
.check_level = -1,
.cib_options = cib_sync_call,
.require_cib = TRUE,
.require_dataset = TRUE,
.require_resource = TRUE,
};
#if 0
// @COMPAT @TODO enable this at next backward compatibility break
#define SET_COMMAND(cmd) do { \
if (options.rsc_cmd != cmd_none) { \
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_USAGE, \
"Only one command option may be specified"); \
return FALSE; \
} \
options.rsc_cmd = (cmd); \
} while (0)
#else
#define SET_COMMAND(cmd) do { \
if (options.rsc_cmd != cmd_none) { \
reset_options(); \
} \
options.rsc_cmd = (cmd); \
} while (0)
#endif
gboolean agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean list_agents_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_providers_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_standards_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean list_alternatives_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean metadata_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean option_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean validate_or_force_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean restart_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean digests_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error);
gboolean wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
static crm_exit_t exit_code = CRM_EX_OK;
static pcmk__output_t *out = NULL;
static pcmk__common_args_t *args = NULL;
// Things that should be cleaned up on exit
static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static cib_t *cib_conn = NULL;
static pcmk_ipc_api_t *controld_api = NULL;
static pe_working_set_t *data_set = NULL;
#define MESSAGE_TIMEOUT_S 60
#define INDENT " "
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
// Clean up and exit
static crm_exit_t
bye(crm_exit_t ec)
{
pcmk__output_and_clear_error(error, out);
if (out != NULL) {
out->finish(out, ec, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
if (cib_conn != NULL) {
cib_t *save_cib_conn = cib_conn;
cib_conn = NULL; // Ensure we can't free this twice
cib__clean_up_connection(&save_cib_conn);
}
if (controld_api != NULL) {
pcmk_ipc_api_t *save_controld_api = controld_api;
controld_api = NULL; // Ensure we can't free this twice
pcmk_free_ipc_api(save_controld_api);
}
if (mainloop != NULL) {
g_main_loop_unref(mainloop);
mainloop = NULL;
}
pe_free_working_set(data_set);
data_set = NULL;
crm_exit(ec);
return ec;
}
static void
quit_main_loop(crm_exit_t ec)
{
exit_code = ec;
if (mainloop != NULL) {
GMainLoop *mloop = mainloop;
mainloop = NULL; // Don't re-enter this block
pcmk_quit_main_loop(mloop, 10);
g_main_loop_unref(mloop);
}
}
static gboolean
resource_ipc_timeout(gpointer data)
{
// Start with newline because "Waiting for ..." message doesn't have one
if (error != NULL) {
g_clear_error(&error);
}
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_TIMEOUT,
_("Aborting because no messages received in %d seconds"), MESSAGE_TIMEOUT_S);
quit_main_loop(CRM_EX_TIMEOUT);
return FALSE;
}
static void
controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
crm_info("Connection to controller was terminated");
}
quit_main_loop(exit_code);
break;
case pcmk_ipc_event_reply:
if (status != CRM_EX_OK) {
out->err(out, "Error: bad reply from controller: %s",
crm_exit_str(status));
pcmk_disconnect_ipc(api);
quit_main_loop(status);
} else {
if ((pcmk_controld_api_replies_expected(api) == 0)
&& mainloop && g_main_loop_is_running(mainloop)) {
out->info(out, "... got reply (done)");
crm_debug("Got all the replies we expected");
pcmk_disconnect_ipc(api);
quit_main_loop(CRM_EX_OK);
} else {
out->info(out, "... got reply");
}
}
break;
default:
break;
}
}
static void
start_mainloop(pcmk_ipc_api_t *capi)
{
unsigned int count = pcmk_controld_api_replies_expected(capi);
if (count > 0) {
out->info(out, "Waiting for %u %s from the controller",
count, pcmk__plural_alt(count, "reply", "replies"));
exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects
mainloop = g_main_loop_new(NULL, FALSE);
g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL);
g_main_loop_run(mainloop);
}
}
static int
compare_id(gconstpointer a, gconstpointer b)
{
return strcmp((const char *)a, (const char *)b);
}
static GList *
build_constraint_list(xmlNode *root)
{
GList *retval = NULL;
xmlNode *cib_constraints = NULL;
xmlXPathObjectPtr xpathObj = NULL;
int ndx = 0;
cib_constraints = pcmk_find_cib_element(root, XML_CIB_TAG_CONSTRAINTS);
xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
for (ndx = 0; ndx < numXpathResults(xpathObj); ndx++) {
xmlNode *match = getXpathResult(xpathObj, ndx);
retval = g_list_insert_sorted(retval, (gpointer) ID(match), compare_id);
}
freeXpathObject(xpathObj);
return retval;
}
/* short option letters still available: eEJkKXyYZ */
static GOptionEntry query_entries[] = {
{ "list", 'L', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List all cluster resources with status",
NULL },
{ "list-raw", 'l', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List IDs of all instantiated resources (individual members\n"
INDENT "rather than groups etc.)",
NULL },
{ "list-cts", 'c', G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
NULL,
NULL },
{ "list-operations", 'O', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List active resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-all-operations", 'o', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, list_cb,
"List all resource operations, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ "list-standards", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
list_standards_cb,
"List supported standards",
NULL },
{ "list-ocf-providers", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
list_providers_cb,
"List all available OCF providers",
NULL },
{ "list-agents", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
list_agents_cb,
"List all agents available for the named standard and/or provider",
"STD:PROV" },
{ "list-ocf-alternatives", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
list_alternatives_cb,
"List all available providers for the named OCF agent",
"AGENT" },
{ "show-metadata", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK,
metadata_cb,
"Show the metadata for the named class:provider:agent",
"SPEC" },
{ "query-xml", 'q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show XML configuration of resource (after any template expansion)",
NULL },
{ "query-xml-raw", 'w', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show XML configuration of resource (before any template expansion)",
NULL },
{ "get-parameter", 'g', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
"Display named parameter for resource (use instance attribute\n"
INDENT "unless --meta or --utilization is specified)",
"PARAM" },
{ "get-property", 'G', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, get_param_prop_cb,
"Display named property of resource ('class', 'type', or 'provider') "
"(requires --resource)",
"PROPERTY" },
{ "locate", 'W', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Show node(s) currently running resource",
NULL },
{ "constraints", 'a', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Display the location and colocation constraints that apply to a\n"
INDENT "resource, and if --recursive is specified, to the resources\n"
INDENT "directly or indirectly involved in those colocations.\n"
INDENT "If the named resource is part of a group, or a clone or\n"
INDENT "bundle instance, constraints for the collective resource\n"
INDENT "will be shown unless --force is given.",
NULL },
{ "stack", 'A', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Equivalent to --constraints --recursive",
NULL },
{ "why", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, why_cb,
"Show why resources are not running, optionally filtered by\n"
INDENT "--resource and/or --node",
NULL },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "validate", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"Validate resource configuration by calling agent's validate-all\n"
INDENT "action. The configuration may be specified either by giving an\n"
INDENT "existing resource name with -r, or by specifying --class,\n"
INDENT "--agent, and --provider arguments, along with any number of\n"
INDENT "--option arguments. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ "cleanup", 'C', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
"If resource has any past failures, clear its history and fail\n"
INDENT "count. Optionally filtered by --resource, --node, --operation\n"
INDENT "and --interval (otherwise all). --operation and --interval\n"
INDENT "apply to fail counts, but entire history is always clear, to\n"
INDENT "allow current state to be rechecked. If the named resource is\n"
INDENT "part of a group, or one numbered instance of a clone or bundled\n"
INDENT "resource, the clean-up applies to the whole collective resource\n"
INDENT "unless --force is given.",
NULL },
{ "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, cleanup_refresh_cb,
"Delete resource's history (including failures) so its current state\n"
INDENT "is rechecked. Optionally filtered by --resource and --node\n"
INDENT "(otherwise all). If the named resource is part of a group, or one\n"
INDENT "numbered instance of a clone or bundled resource, the refresh\n"
INDENT "applies to the whole collective resource unless --force is given.",
NULL },
{ "set-parameter", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
"Set named parameter for resource (requires -v). Use instance\n"
INDENT "attribute unless --meta or --utilization is specified.",
"PARAM" },
{ "delete-parameter", 'd', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, set_delete_param_cb,
"Delete named parameter for resource. Use instance attribute\n"
INDENT "unless --meta or --utilization is specified.",
"PARAM" },
{ "set-property", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, set_prop_cb,
"Set named property of resource ('class', 'type', or 'provider') "
"(requires -r, -t, -v)",
"PROPERTY" },
{ NULL }
};
static GOptionEntry location_entries[] = {
{ "move", 'M', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Create a constraint to move resource. If --node is specified,\n"
INDENT "the constraint will be to move to that node, otherwise it\n"
INDENT "will be to ban the current node. Unless --force is specified\n"
INDENT "this will return an error if the resource is already running\n"
INDENT "on the specified node. If --force is specified, this will\n"
INDENT "always ban the current node.\n"
INDENT "Optional: --lifetime, --promoted. NOTE: This may prevent the\n"
INDENT "resource from running on its previous location until the\n"
INDENT "implicit constraint expires or is removed with --clear.",
NULL },
{ "ban", 'B', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Create a constraint to keep resource off a node.\n"
INDENT "Optional: --node, --lifetime, --promoted.\n"
INDENT "NOTE: This will prevent the resource from running on the\n"
INDENT "affected node until the implicit constraint expires or is\n"
INDENT "removed with --clear. If --node is not specified, it defaults\n"
INDENT "to the node currently running the resource for primitives\n"
INDENT "and groups, or the promoted instance of promotable clones with\n"
INDENT "promoted-max=1 (all other situations result in an error as\n"
INDENT "there is no sane default).",
NULL },
{ "clear", 'U', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, flag_cb,
"Remove all constraints created by the --ban and/or --move\n"
INDENT "commands. Requires: --resource. Optional: --node, --promoted,\n"
INDENT "--expired. If --node is not specified, all constraints created\n"
INDENT "by --ban and --move will be removed for the named resource. If\n"
INDENT "--node and --force are specified, any constraint created by\n"
INDENT "--move will be cleared, even if it is not for the specified\n"
INDENT "node. If --expired is specified, only those constraints whose\n"
INDENT "lifetimes have expired will be removed.",
NULL },
{ "expired", 'e', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, expired_cb,
"Modifies the --clear argument to remove constraints with\n"
INDENT "expired lifetimes.",
NULL },
{ "lifetime", 'u', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.move_lifetime,
"Lifespan (as ISO 8601 duration) of created constraints (with\n"
INDENT "-B, -M) see https://en.wikipedia.org/wiki/ISO_8601#Durations)",
"TIMESPEC" },
{ "promoted", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Limit scope of command to promoted role (with -B, -M, -U). For\n"
INDENT "-B and -M, previously promoted instances may remain\n"
INDENT "active in the unpromoted role.",
NULL },
// Deprecated since 2.1.0
{ "master", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
&options.promoted_role_only,
"Deprecated: Use --promoted instead", NULL },
{ NULL }
};
static GOptionEntry advanced_entries[] = {
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
"(Advanced) Delete a resource from the CIB. Required: -t",
NULL },
{ "fail", 'F', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, fail_cb,
"(Advanced) Tell the cluster this resource has failed",
NULL },
{ "restart", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, restart_cb,
"(Advanced) Tell the cluster to restart this resource and\n"
INDENT "anything that depends on it",
NULL },
{ "wait", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, wait_cb,
"(Advanced) Wait until the cluster settles into a stable state",
NULL },
{ "digests", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, digests_cb,
"(Advanced) Show parameter hashes that Pacemaker uses to detect\n"
INDENT "configuration changes (only accurate if there is resource\n"
INDENT "history on the specified node). Required: --resource, --node.\n"
INDENT "Optional: any NAME=VALUE parameters will be used to override\n"
INDENT "the configuration (to see what the hash would be with those\n"
INDENT "changes).",
NULL },
{ "force-demote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and demote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-stop", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and stop a resource on the local node",
NULL },
{ "force-start", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and start a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-promote", 0, G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and promote a resource on the local\n"
INDENT "node. Unless --force is specified, this will refuse to do so if\n"
INDENT "the cluster believes the resource is a clone instance already\n"
INDENT "running on the local node.",
NULL },
{ "force-check", 0, G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK,
validate_or_force_cb,
"(Advanced) Bypass the cluster and check the state of a resource on\n"
INDENT "the local node. An optional LEVEL argument can be given\n"
INDENT "to control the level of checking performed.",
"LEVEL" },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "node", 'N', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.host_uname,
"Node name",
"NAME" },
{ "recursive", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.recursive,
"Follow colocation chains when using --set-parameter or --constraints",
NULL },
{ "resource-type", 't', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_type,
"Resource XML element (primitive, group, etc.) (with -D)",
"ELEMENT" },
{ "parameter-value", 'v', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_value,
"Value to use with -p",
"PARAM" },
{ "meta", 'm', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource meta-attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
"Use resource utilization attribute instead of instance attribute\n"
INDENT "(with -p, -g, -d)",
NULL },
{ "operation", 'n', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.operation,
"Operation to clear instead of all (with -C -r)",
"OPERATION" },
{ "interval", 'I', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.interval_spec,
"Interval of operation to clear (default 0) (with -C -r -n)",
"N" },
{ "class", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, class_cb,
"The standard the resource agent conforms to (for example, ocf).\n"
INDENT "Use with --agent, --provider, --option, and --validate.",
"CLASS" },
{ "agent", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
"The agent to use (for example, IPaddr). Use with --class,\n"
INDENT "--provider, --option, and --validate.",
"AGENT" },
{ "provider", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, agent_provider_cb,
"The vendor that supplies the resource agent (for example,\n"
INDENT "heartbeat). Use with --class, --agent, --option, and --validate.",
"PROVIDER" },
{ "option", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, option_cb,
"Specify a device configuration parameter as NAME=VALUE (may be\n"
INDENT "specified multiple times). Use with --validate and without the\n"
INDENT "-r option.",
"PARAM" },
{ "set-name", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_set,
"(Advanced) XML ID of attributes element to use (with -p, -d)",
"ID" },
{ "nvpair", 'i', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.prop_id,
"(Advanced) XML ID of nvpair element to use (with -p, -d)",
"ID" },
{ "timeout", 'T', G_OPTION_FLAG_NONE, G_OPTION_ARG_CALLBACK, timeout_cb,
"(Advanced) Abort if command does not finish in this time (with\n"
INDENT "--restart, --wait, --force-*)",
"N" },
{ "force", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.force,
"If making CIB changes, do so regardless of quorum. See help for\n"
INDENT "individual commands for additional behavior.",
NULL },
{ "xml-file", 'x', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_FILENAME, &options.xml_file,
NULL,
"FILE" },
{ "host-uname", 'H', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.host_uname,
NULL,
"HOST" },
{ NULL }
};
static void
reset_options(void) {
options.require_crmd = FALSE;
options.require_node = FALSE;
options.require_cib = TRUE;
options.require_dataset = TRUE;
options.require_resource = TRUE;
options.find_flags = 0;
}
gboolean
agent_provider_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.cmdline_config = TRUE;
options.require_resource = FALSE;
if (pcmk__str_eq(option_name, "--provider", pcmk__str_casei)) {
pcmk__str_update(&options.v_provider, optarg);
} else {
pcmk__str_update(&options.v_agent, optarg);
}
return TRUE;
}
gboolean
attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-m", "--meta", NULL)) {
options.attr_set_type = XML_TAG_META_SETS;
} else if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
options.attr_set_type = XML_TAG_UTILIZATION;
}
return TRUE;
}
gboolean
class_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
pcmk__str_update(&options.v_class, optarg);
options.cmdline_config = TRUE;
options.require_resource = FALSE;
return TRUE;
}
gboolean
cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-C", "--cleanup", NULL)) {
SET_COMMAND(cmd_cleanup);
} else {
SET_COMMAND(cmd_refresh);
}
options.require_resource = FALSE;
if (getenv("CIB_file") == NULL) {
options.require_crmd = TRUE;
}
options.find_flags = pe_find_renamed|pe_find_anon;
return TRUE;
}
gboolean
delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_delete);
options.require_dataset = FALSE;
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
expired_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.clear_expired = TRUE;
options.require_resource = FALSE;
return TRUE;
}
static void
get_agent_spec(const gchar *optarg)
{
options.require_cib = FALSE;
options.require_dataset = FALSE;
options.require_resource = FALSE;
pcmk__str_update(&options.agent_spec, optarg);
}
gboolean
list_agents_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_agents);
get_agent_spec(optarg);
return TRUE;
}
gboolean
list_providers_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_providers);
get_agent_spec(optarg);
return TRUE;
}
gboolean
list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_list_standards);
options.require_cib = FALSE;
options.require_dataset = FALSE;
options.require_resource = FALSE;
return TRUE;
}
gboolean
list_alternatives_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error)
{
SET_COMMAND(cmd_list_alternatives);
get_agent_spec(optarg);
return TRUE;
}
gboolean
metadata_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_metadata);
get_agent_spec(optarg);
return TRUE;
}
gboolean
option_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
char *name = NULL;
char *value = NULL;
if (pcmk__scan_nvpair(optarg, &name, &value) != 2) {
return FALSE;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
g_hash_table_replace(options.cmdline_params, name, value);
return TRUE;
}
gboolean
fail_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_fail);
options.require_crmd = TRUE;
options.require_node = TRUE;
return TRUE;
}
gboolean
flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
SET_COMMAND(cmd_clear);
options.find_flags = pe_find_renamed|pe_find_anon;
} else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
SET_COMMAND(cmd_ban);
options.find_flags = pe_find_renamed|pe_find_anon;
} else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
SET_COMMAND(cmd_move);
options.find_flags = pe_find_renamed|pe_find_anon;
} else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
SET_COMMAND(cmd_query_xml);
options.find_flags = pe_find_renamed|pe_find_any;
} else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
SET_COMMAND(cmd_query_raw_xml);
options.find_flags = pe_find_renamed|pe_find_any;
} else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
SET_COMMAND(cmd_locate);
options.find_flags = pe_find_renamed|pe_find_anon;
} else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) {
SET_COMMAND(cmd_colocations);
options.find_flags = pe_find_renamed|pe_find_anon;
} else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
SET_COMMAND(cmd_colocations);
options.find_flags = pe_find_renamed|pe_find_anon;
options.recursive = TRUE;
}
return TRUE;
}
gboolean
get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-g", "--get-parameter", NULL)) {
SET_COMMAND(cmd_get_param);
} else {
SET_COMMAND(cmd_get_property);
}
pcmk__str_update(&options.prop_name, optarg);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
list_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-c", "--list-cts", NULL)) {
SET_COMMAND(cmd_cts);
} else if (pcmk__str_any_of(option_name, "-L", "--list", NULL)) {
SET_COMMAND(cmd_list_resources);
} else if (pcmk__str_any_of(option_name, "-l", "--list-raw", NULL)) {
SET_COMMAND(cmd_list_instances);
} else if (pcmk__str_any_of(option_name, "-O", "--list-operations", NULL)) {
SET_COMMAND(cmd_list_active_ops);
} else {
SET_COMMAND(cmd_list_all_ops);
}
options.require_resource = FALSE;
return TRUE;
}
gboolean
set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-p", "--set-parameter", NULL)) {
SET_COMMAND(cmd_set_param);
} else {
SET_COMMAND(cmd_delete_param);
}
pcmk__str_update(&options.prop_name, optarg);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_set_property);
options.require_dataset = FALSE;
pcmk__str_update(&options.prop_name, optarg);
options.find_flags = pe_find_renamed|pe_find_any;
return TRUE;
}
gboolean
timeout_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.timeout_ms = crm_get_msec(optarg);
return TRUE;
}
gboolean
validate_or_force_cb(const gchar *option_name, const gchar *optarg,
gpointer data, GError **error)
{
SET_COMMAND(cmd_execute_agent);
if (options.operation) {
g_free(options.operation);
}
options.operation = g_strdup(option_name + 2); // skip "--"
options.find_flags = pe_find_renamed|pe_find_anon;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
if (optarg != NULL) {
if (pcmk__scan_min_int(optarg, &options.check_level, 0) != pcmk_rc_ok) {
g_set_error(error, G_OPTION_ERROR, CRM_EX_INVALID_PARAM,
_("Invalid check level setting: %s"), optarg);
return FALSE;
}
}
return TRUE;
}
gboolean
restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_restart);
options.find_flags = pe_find_renamed|pe_find_anon;
return TRUE;
}
gboolean
digests_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_digests);
options.find_flags = pe_find_renamed|pe_find_anon;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
options.require_node = TRUE;
options.require_dataset = TRUE;
return TRUE;
}
gboolean
wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_wait);
options.require_resource = FALSE;
options.require_dataset = FALSE;
return TRUE;
}
gboolean
why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_why);
options.require_resource = FALSE;
options.find_flags = pe_find_renamed|pe_find_anon;
return TRUE;
}
static int
ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime)
{
int rc = pcmk_rc_ok;
pe_node_t *current = NULL;
unsigned int nactive = 0;
CRM_CHECK(rsc != NULL, return EINVAL);
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
cib_conn, options.cib_options, options.promoted_role_only);
} else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
int count = 0;
GList *iter = NULL;
current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
pe_resource_t *child = (pe_resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if (child_role == RSC_ROLE_PROMOTED) {
count++;
current = pe__current_node(child);
}
}
if(count == 1 && current) {
rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
cib_conn, options.cib_options, options.promoted_role_only);
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations (promoted in %d).\n"
"To prevent '%s' from running on a specific location, "
"specify a node."
"To prevent '%s' from being promoted at a specific "
"location, specify a node and the --promoted option."),
options.rsc_id, nactive, count, options.rsc_id, options.rsc_id);
}
} else {
rc = EINVAL;
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Resource '%s' not moved: active in %d locations.\n"
"To prevent '%s' from running on a specific location, "
"specify a node."),
options.rsc_id, nactive, options.rsc_id);
}
return rc;
}
static void
cleanup(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, options.operation,
options.interval_spec, TRUE, data_set, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
{
GList *before = NULL;
GList *after = NULL;
GList *remaining = NULL;
GList *ele = NULL;
pe_node_t *dest = NULL;
int rc = pcmk_rc_ok;
if (!out->is_quiet(out)) {
before = build_constraint_list(data_set->input);
}
if (options.clear_expired) {
rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options,
options.rsc_id, options.host_uname,
options.promoted_role_only);
} else if (options.host_uname) {
dest = pe_find_node(data_set->nodes, options.host_uname);
if (dest == NULL) {
rc = pcmk_rc_node_unknown;
if (!out->is_quiet(out)) {
g_list_free(before);
}
return rc;
}
rc = cli_resource_clear(options.rsc_id, dest->details->uname, NULL,
cib_conn, options.cib_options, TRUE, options.force);
} else {
rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes,
cib_conn, options.cib_options, TRUE, options.force);
}
if (!out->is_quiet(out)) {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not get modified CIB: %s\n"), pcmk_strerror(rc));
g_list_free(before);
free_xml(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
data_set->input = *cib_xml_copy;
cluster_status(data_set);
after = build_constraint_list(data_set->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
for (ele = remaining; ele != NULL; ele = ele->next) {
out->info(out, "Removing constraint: %s", (char *) ele->data);
}
g_list_free(before);
g_list_free(after);
g_list_free(remaining);
}
return rc;
}
static int
delete(void)
{
int rc = pcmk_rc_ok;
xmlNode *msg_data = NULL;
if (options.rsc_type == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("You need to specify a resource type with -t"));
return rc;
}
msg_data = create_xml_node(NULL, options.rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
options.cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(msg_data);
return rc;
}
static int
list_agents(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *provider = strchr(agent_spec, ':');
lrmd_t *lrmd_conn = NULL;
lrmd_list_t *list = NULL;
rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
if (rc != pcmk_rc_ok) {
goto error;
}
if (provider) {
*provider++ = 0;
}
rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
if (rc > 0) {
rc = out->message(out, "agents-list", list, agent_spec, provider);
} else {
rc = pcmk_rc_error;
}
error:
if (rc != pcmk_rc_ok) {
if (provider == NULL) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No agents found for standard '%s'"), agent_spec);
} else {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No agents found for standard '%s' and provider '%s'"),
agent_spec, provider);
}
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static int
list_providers(pcmk__output_t *out, const char *agent_spec)
{
int rc;
const char *text = NULL;
lrmd_t *lrmd_conn = NULL;
lrmd_list_t *list = NULL;
rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
if (rc != pcmk_rc_ok) {
goto error;
}
switch (options.rsc_cmd) {
case cmd_list_alternatives:
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
if (rc > 0) {
rc = out->message(out, "alternatives-list", list, agent_spec);
} else {
rc = pcmk_rc_error;
}
text = "OCF providers";
break;
case cmd_list_standards:
rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
if (rc > 0) {
rc = out->message(out, "standards-list", list);
} else {
rc = pcmk_rc_error;
}
text = "standards";
break;
case cmd_list_providers:
rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
if (rc > 0) {
rc = out->message(out, "providers-list", list, agent_spec);
} else {
rc = pcmk_rc_error;
}
text = "OCF providers";
break;
default:
g_set_error(&error, PCMK__RC_ERROR, pcmk_rc_error, "Bug");
lrmd_api_delete(lrmd_conn);
return pcmk_rc_error;
}
error:
if (rc != pcmk_rc_ok) {
if (agent_spec != NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No %s found for %s"), text, agent_spec);
} else {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No %s found"), text);
}
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static int
populate_working_set(xmlNodePtr *cib_xml_copy)
{
int rc = pcmk_rc_ok;
if (options.xml_file != NULL) {
*cib_xml_copy = filename2xml(options.xml_file);
if (*cib_xml_copy == NULL) {
rc = pcmk_rc_cib_corrupt;
}
} else {
rc = cib_conn->cmds->query(cib_conn, NULL, cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
}
if (rc == pcmk_rc_ok) {
data_set = pe_new_working_set();
if (data_set == NULL) {
rc = ENOMEM;
} else {
pe__set_working_set_flags(data_set,
pe_flag_no_counts|pe_flag_no_compat);
data_set->priv = out;
rc = update_working_set_xml(data_set, cib_xml_copy);
}
}
if (rc != pcmk_rc_ok) {
free_xml(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
cluster_status(data_set);
return pcmk_rc_ok;
}
static int
refresh(pcmk__output_t *out)
{
int rc = pcmk_rc_ok;
const char *router_node = options.host_uname;
int attr_options = pcmk__node_attr_none;
if (options.host_uname) {
pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname);
if (pe__is_guest_or_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
if (node == NULL) {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("No cluster connection to Pacemaker Remote node %s detected"),
options.host_uname);
return rc;
}
router_node = node->details->uname;
attr_options |= pcmk__node_attr_remote;
}
}
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
options.host_uname? options.host_uname : "all nodes");
rc = pcmk_rc_ok;
return rc;
}
crm_debug("Re-checking the state of all resources on %s", options.host_uname?options.host_uname:"all nodes");
rc = pcmk__attrd_api_clear_failures(NULL, options.host_uname, NULL,
NULL, NULL, NULL, attr_options);
if (pcmk_controld_api_reprobe(controld_api, options.host_uname,
router_node) == pcmk_rc_ok) {
start_mainloop(controld_api);
}
return rc;
}
static void
refresh_resource(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
{
int rc = pcmk_rc_ok;
if (options.force == FALSE) {
rsc = uber_parent(rsc);
}
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
FALSE, data_set, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
cli_resource_check(out, rsc, node);
}
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
}
static int
set_property(void)
{
int rc = pcmk_rc_ok;
xmlNode *msg_data = NULL;
if (pcmk__str_empty(options.rsc_type)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Must specify -t with resource type"));
rc = ENXIO;
return rc;
} else if (pcmk__str_empty(options.prop_value)) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("Must supply -v with new value"));
rc = ENXIO;
return rc;
}
CRM_LOG_ASSERT(options.prop_name != NULL);
msg_data = create_xml_node(NULL, options.rsc_type);
crm_xml_add(msg_data, XML_ATTR_ID, options.rsc_id);
crm_xml_add(msg_data, options.prop_name, options.prop_value);
rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data,
options.cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(msg_data);
return rc;
}
static int
show_metadata(pcmk__output_t *out, const char *agent_spec)
{
int rc = pcmk_rc_ok;
char *standard = NULL;
char *provider = NULL;
char *type = NULL;
char *metadata = NULL;
lrmd_t *lrmd_conn = NULL;
rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Could not create executor connection"));
lrmd_api_delete(lrmd_conn);
return rc;
}
rc = crm_parse_agent_spec(agent_spec, &standard, &provider, &type);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
provider, type,
&metadata, 0);
rc = pcmk_legacy2rc(rc);
if (metadata) {
out->output_xml(out, "metadata", metadata);
free(metadata);
} else {
/* We were given a validly formatted spec, but it doesn't necessarily
* match up with anything that exists. Use ENXIO as the return code
* here because that maps to an exit code of CRM_EX_NOSUCH, which
* probably is the most common reason to get here.
*/
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("Metadata query for %s failed: %s"),
agent_spec, pcmk_rc_str(rc));
}
} else {
rc = ENXIO;
g_set_error(&error, PCMK__RC_ERROR, rc,
_("'%s' is not a valid agent specification"), agent_spec);
}
lrmd_api_delete(lrmd_conn);
return rc;
}
static void
validate_cmdline_config(void)
{
// Cannot use both --resource and command-line resource configuration
if (options.rsc_id != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--resource cannot be used with --class, --agent, and --provider"));
// Not all commands support command-line resource configuration
} else if (options.rsc_cmd != cmd_execute_agent) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("--class, --agent, and --provider can only be used with "
"--validate and --force-*"));
// Not all of --class, --agent, and --provider need to be given. Not all
// classes support the concept of a provider. Check that what we were given
// is valid.
} else if (pcmk__str_eq(options.v_class, "stonith", pcmk__str_none)) {
if (options.v_provider != NULL) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("stonith does not support providers"));
} else if (stonith_agent_exists(options.v_agent, 0) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s is not a known stonith agent"), options.v_agent ? options.v_agent : "");
}
} else if (resources_agent_exists(options.v_class, options.v_provider, options.v_agent) == FALSE) {
g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_USAGE,
_("%s:%s:%s is not a known resource"),
options.v_class ? options.v_class : "",
options.v_provider ? options.v_provider : "",
options.v_agent ? options.v_agent : "");
}
if (error != NULL) {
return;
}
if (options.cmdline_params == NULL) {
options.cmdline_params = pcmk__strkey_table(free, free);
}
options.require_resource = FALSE;
options.require_dataset = FALSE;
options.require_cib = FALSE;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'Q', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &(args->quiet),
"Be less descriptive in output.",
NULL },
{ "resource", 'r', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.rsc_id,
"Resource ID",
"ID" },
{ G_OPTION_REMAINING, 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING_ARRAY, &options.remainder,
NULL,
NULL },
{ NULL }
};
const char *description = "Examples:\n\n"
"List the available OCF agents:\n\n"
"\t# crm_resource --list-agents ocf\n\n"
"List the available OCF agents from the linux-ha project:\n\n"
"\t# crm_resource --list-agents ocf:heartbeat\n\n"
"Move 'myResource' to a specific node:\n\n"
"\t# crm_resource --resource myResource --move --node altNode\n\n"
"Allow (but not force) 'myResource' to move back to its original "
"location:\n\n"
"\t# crm_resource --resource myResource --clear\n\n"
"Stop 'myResource' (and anything that depends on it):\n\n"
"\t# crm_resource --resource myResource --set-parameter target-role "
"--meta --parameter-value Stopped\n\n"
"Tell the cluster not to manage 'myResource' (the cluster will not "
"attempt to start or stop the\n"
"resource under any circumstances; useful when performing maintenance "
"tasks on a resource):\n\n"
"\t# crm_resource --resource myResource --set-parameter is-managed "
"--meta --parameter-value false\n\n"
"Erase the operation history of 'myResource' on 'aNode' (the cluster "
"will 'forget' the existing\n"
"resource state, including any errors, and attempt to recover the"
"resource; useful when a resource\n"
"had failed permanently and has been repaired by an administrator):\n\n"
"\t# crm_resource --resource myResource --cleanup --node aNode\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
g_option_context_set_description(context, description);
/* Add the -Q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
*/
pcmk__add_main_args(context, extra_prog_entries);
pcmk__add_arg_group(context, "queries", "Queries:",
"Show query help", query_entries);
pcmk__add_arg_group(context, "commands", "Commands:",
"Show command help", command_entries);
pcmk__add_arg_group(context, "locations", "Locations:",
"Show location help", location_entries);
pcmk__add_arg_group(context, "advanced", "Advanced:",
"Show advanced option help", advanced_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
return context;
}
int
main(int argc, char **argv)
{
xmlNode *cib_xml_copy = NULL;
pe_resource_t *rsc = NULL;
pe_node_t *node = NULL;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
gchar **processed_args = NULL;
GOptionContext *context = NULL;
/*
* Parse command line arguments
*/
args = pcmk__new_common_args(SUMMARY);
processed_args = pcmk__cmdline_preproc(argv, "GHINSTdginpstuvx");
context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_resource", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("Error creating output format %s: %s"),
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pe__register_messages(out);
crm_resource_register_messages(out);
lrmd__register_messages(out);
pcmk__register_lib_messages(out);
out->quiet = args->quiet;
crm_log_args(argc, argv);
/*
* Validate option combinations
*/
// If the user didn't explicitly specify a command, list resources
if (options.rsc_cmd == cmd_none) {
options.rsc_cmd = cmd_list_resources;
options.require_resource = FALSE;
}
// --expired without --clear/-U doesn't make sense
if (options.clear_expired && (options.rsc_cmd != cmd_clear)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, _("--expired requires --clear or -U"));
goto done;
}
if ((options.remainder != NULL) && (options.override_params != NULL)) {
// Commands that use positional arguments will create override_params
for (gchar **s = options.remainder; *s; s++) {
char *name = calloc(1, strlen(*s));
char *value = calloc(1, strlen(*s));
int rc = sscanf(*s, "%[^=]=%s", name, value);
if (rc == 2) {
g_hash_table_replace(options.override_params, name, value);
} else {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error parsing '%s' as a name=value pair"),
argv[optind]);
free(value);
free(name);
goto done;
}
}
} else if (options.remainder != NULL) {
gchar **strv = NULL;
gchar *msg = NULL;
int i = 1;
int len = 0;
for (gchar **s = options.remainder; *s; s++) {
len++;
}
CRM_ASSERT(len > 0);
/* Add 1 for the strv[0] string below, and add another 1 for the NULL
* at the end of the array so g_strjoinv knows when to stop.
*/
strv = calloc(len+2, sizeof(char *));
strv[0] = strdup("non-option ARGV-elements:\n");
for (gchar **s = options.remainder; *s; s++) {
strv[i] = crm_strdup_printf("[%d of %d] %s\n", i, len, *s);
i++;
}
strv[i] = NULL;
exit_code = CRM_EX_USAGE;
msg = g_strjoinv("", strv);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
g_free(msg);
/* Don't try to free the last element, which is just NULL. */
for(i = 0; i < len+1; i++) {
free(strv[i]);
}
free(strv);
goto done;
}
if (pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
/* Kind of a hack to display XML lists using a real tag instead of <list>. This just
* saves from having to write custom messages to build the lists around all these things
*/
switch (options.rsc_cmd) {
case cmd_execute_agent:
case cmd_list_resources:
case cmd_query_xml:
case cmd_query_raw_xml:
case cmd_list_active_ops:
case cmd_list_all_ops:
case cmd_colocations:
pcmk__force_args(context, &error, "%s --xml-simple-list --xml-substitute", g_get_prgname());
break;
default:
pcmk__force_args(context, &error, "%s --xml-substitute", g_get_prgname());
break;
}
} else if (pcmk__str_eq(args->output_ty, "text", pcmk__str_null_matches)) {
if ((options.rsc_cmd == cmd_colocations) ||
options.rsc_cmd == cmd_list_resources) {
pcmk__force_args(context, &error, "%s --text-fancy", g_get_prgname());
}
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.cmdline_config) {
/* A resource configuration was given on the command line. Sanity-check
* the values and set error if they don't make sense.
*/
validate_cmdline_config();
if (error != NULL) {
exit_code = CRM_EX_USAGE;
goto done;
}
} else if (options.cmdline_params != NULL) {
// @COMPAT @TODO error out here when we can break backward compatibility
g_hash_table_destroy(options.cmdline_params);
options.cmdline_params = NULL;
}
if (options.require_resource && (options.rsc_id == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a resource id with -r"));
goto done;
}
if (options.require_node && (options.host_uname == NULL)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Must supply a node name with -N"));
goto done;
}
/*
* Set up necessary connections
*/
if (options.force) {
crm_debug("Forcing...");
cib__set_call_options(options.cib_options, crm_system_name,
cib_quorum_override);
}
if (options.find_flags && options.rsc_id) {
options.require_dataset = TRUE;
}
// Establish a connection to the CIB if needed
if (options.require_cib) {
cib_conn = cib_new();
if ((cib_conn == NULL) || (cib_conn->cmds == NULL)) {
exit_code = CRM_EX_DISCONNECT;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not create CIB connection"));
goto done;
}
rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Could not connect to the CIB: %s"), pcmk_rc_str(rc));
goto done;
}
}
/* Populate working set from XML file if specified or CIB query otherwise */
if (options.require_dataset) {
rc = populate_working_set(&cib_xml_copy);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
goto done;
}
}
// If command requires that resource exist if specified, find it
if (options.find_flags && options.rsc_id) {
rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
options.find_flags);
if (rsc == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Resource '%s' not found"), options.rsc_id);
goto done;
}
/* The --ban, --clear, --move, and --restart commands do not work with
* instances of clone resourcs.
*/
if (strchr(options.rsc_id, ':') != NULL && pe_rsc_is_clone(rsc->parent) &&
(options.rsc_cmd == cmd_ban || options.rsc_cmd == cmd_clear ||
options.rsc_cmd == cmd_move || options.rsc_cmd == cmd_restart)) {
exit_code = CRM_EX_INVALID_PARAM;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Cannot operate on clone resource instance '%s'"), options.rsc_id);
goto done;
}
}
// If user supplied a node name, check whether it exists
if ((options.host_uname != NULL) && (data_set != NULL)) {
node = pe_find_node(data_set->nodes, options.host_uname);
if (node == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Node '%s' not found"), options.host_uname);
goto done;
}
}
// Establish a connection to the controller if needed
if (options.require_crmd) {
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to the controller: %s"), pcmk_rc_str(rc));
goto done;
}
pcmk_register_ipc_callback(controld_api, controller_event_callback,
NULL);
rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error connecting to the controller: %s"), pcmk_rc_str(rc));
goto done;
}
}
/*
* Handle requested command
*/
switch (options.rsc_cmd) {
case cmd_list_resources: {
GList *all = NULL;
all = g_list_prepend(all, (gpointer) "*");
rc = out->message(out, "resource-list", data_set,
pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending,
true, all, all, false);
g_list_free(all);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
}
case cmd_list_instances:
rc = out->message(out, "resource-names-list", data_set->resources);
if (rc != pcmk_rc_ok) {
rc = ENXIO;
}
break;
case cmd_list_standards:
case cmd_list_providers:
case cmd_list_alternatives:
rc = list_providers(out, options.agent_spec);
break;
case cmd_list_agents:
rc = list_agents(out, options.agent_spec);
break;
case cmd_metadata:
rc = show_metadata(out, options.agent_spec);
break;
case cmd_restart:
/* We don't pass data_set because rsc needs to stay valid for the
* entire lifetime of cli_resource_restart(), but it will reset and
* update the working set multiple times, so it needs to use its own
* copy.
*/
rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
options.timeout_ms, cib_conn,
options.cib_options, options.promoted_role_only,
options.force);
break;
case cmd_wait:
rc = wait_till_stable(out, options.timeout_ms, cib_conn);
break;
case cmd_execute_agent:
if (options.cmdline_config) {
exit_code = cli_resource_execute_from_params(out, NULL,
options.v_class, options.v_provider, options.v_agent,
options.operation, options.cmdline_params,
options.override_params, options.timeout_ms,
args->verbosity, options.force, options.check_level);
} else {
exit_code = cli_resource_execute(rsc, options.rsc_id,
options.operation, options.override_params,
options.timeout_ms, cib_conn, data_set,
args->verbosity, options.force, options.check_level);
}
goto done;
case cmd_digests:
node = pe_find_node(data_set->nodes, options.host_uname);
if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = pcmk__resource_digests(out, rsc, node,
options.override_params);
}
break;
case cmd_colocations:
rc = out->message(out, "locations-and-colocations", rsc, data_set,
options.recursive, (bool) options.force);
break;
case cmd_cts:
rc = pcmk_rc_ok;
g_list_foreach(data_set->resources, (GFunc) cli_resource_print_cts, out);
cli_resource_print_cts_constraints(data_set);
break;
case cmd_fail:
rc = cli_resource_fail(controld_api, options.host_uname,
options.rsc_id, data_set);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
break;
case cmd_list_active_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, TRUE,
data_set);
break;
case cmd_list_all_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, FALSE,
data_set);
break;
case cmd_locate: {
GList *nodes = cli_resource_search(rsc, options.rsc_id, data_set);
rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
g_list_free_full(nodes, free);
break;
}
case cmd_query_xml:
rc = cli_resource_print(rsc, data_set, true);
break;
case cmd_query_raw_xml:
rc = cli_resource_print(rsc, data_set, false);
break;
case cmd_why:
if ((options.host_uname != NULL) && (node == NULL)) {
rc = pcmk_rc_node_unknown;
} else {
rc = out->message(out, "resource-reasons-list",
data_set->resources, rsc, node);
}
break;
case cmd_clear:
rc = clear_constraints(out, &cib_xml_copy);
break;
case cmd_move:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else {
rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
options.move_lifetime, cib_conn,
options.cib_options, data_set,
options.promoted_role_only,
options.force);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_ban:
if (options.host_uname == NULL) {
rc = ban_or_move(out, rsc, options.move_lifetime);
} else if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
options.move_lifetime, NULL, cib_conn,
options.cib_options,
options.promoted_role_only);
}
if (rc == EINVAL) {
exit_code = CRM_EX_USAGE;
goto done;
}
break;
case cmd_get_property:
rc = out->message(out, "property-list", rsc, options.prop_name);
if (rc == pcmk_rc_no_output) {
rc = ENXIO;
}
break;
case cmd_set_property:
rc = set_property();
break;
case cmd_get_param: {
unsigned int count = 0;
GHashTable *params = NULL;
- pe_node_t *current = pe__find_active_on(rsc, &count, NULL);
+ pe_node_t *current = rsc->fns->active_node(rsc, &count, NULL);
bool free_params = true;
if (count > 1) {
out->err(out, "%s is active on more than one node,"
" returning the default value for %s", rsc->id,
pcmk__s(options.prop_name, "unspecified property"));
current = NULL;
}
crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
params = pe_rsc_params(rsc, current, data_set);
free_params = false;
} else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
params = pcmk__strkey_table(free, free);
get_meta_attributes(params, rsc, current, data_set);
} else {
params = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params,
NULL, FALSE, data_set);
}
rc = out->message(out, "attribute-list", rsc, options.prop_name, params);
if (free_params) {
g_hash_table_destroy(params);
}
break;
}
case cmd_set_param:
if (pcmk__str_empty(options.prop_value)) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("You need to supply a value with the -v option"));
goto done;
}
/* coverity[var_deref_model] False positive */
rc = cli_resource_update_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name,
options.prop_value,
options.recursive, cib_conn,
options.cib_options, data_set,
options.force);
break;
case cmd_delete_param:
/* coverity[var_deref_model] False positive */
rc = cli_resource_delete_attribute(rsc, options.rsc_id,
options.prop_set,
options.attr_set_type,
options.prop_id,
options.prop_name, cib_conn,
options.cib_options, data_set,
options.force);
break;
case cmd_cleanup:
if (rsc == NULL) {
rc = cli_cleanup_all(controld_api, options.host_uname,
options.operation, options.interval_spec,
data_set);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
} else {
cleanup(out, rsc, node);
}
break;
case cmd_refresh:
if (rsc == NULL) {
rc = refresh(out);
} else {
refresh_resource(out, rsc, node);
}
break;
case cmd_delete:
rc = delete();
break;
default:
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Unimplemented command: %d"), (int) options.rsc_cmd);
goto done;
}
/* Convert rc into an exit code. */
if (rc != pcmk_rc_ok && rc != pcmk_rc_no_output) {
if (rc == pcmk_rc_no_quorum) {
g_prefix_error(&error, "To ignore quorum, use the force option.\n");
}
exit_code = pcmk_rc2exitc(rc);
}
/*
* Clean up and exit
*/
done:
/* When we get here, exit_code has been set one of two ways - either at one of
* the spots where there's a "goto done" (which itself could have happened either
* directly or by calling pcmk_rc2exitc), or just up above after any of the break
* statements.
*
* Thus, we can use just exit_code here to decide what to do.
*/
if (exit_code != CRM_EX_OK && exit_code != CRM_EX_USAGE) {
if (error != NULL) {
char *msg = crm_strdup_printf("%s\nError performing operation: %s",
error->message, crm_exit_str(exit_code));
g_clear_error(&error);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "%s", msg);
free(msg);
} else {
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
_("Error performing operation: %s"), crm_exit_str(exit_code));
}
}
g_free(options.host_uname);
g_free(options.interval_spec);
g_free(options.move_lifetime);
g_free(options.operation);
g_free(options.prop_id);
free(options.prop_name);
g_free(options.prop_set);
g_free(options.prop_value);
g_free(options.rsc_id);
g_free(options.rsc_type);
free(options.agent_spec);
free(options.v_agent);
free(options.v_class);
free(options.v_provider);
g_free(options.xml_file);
g_strfreev(options.remainder);
if (options.override_params != NULL) {
g_hash_table_destroy(options.override_params);
}
/* options.cmdline_params does not need to be destroyed here. See the
* comments in cli_resource_execute_from_params.
*/
g_strfreev(processed_args);
g_option_context_free(context);
return bye(exit_code);
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Jan 25, 11:11 AM (1 d, 7 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1322297
Default Alt Text
(265 KB)

Event Timeline