Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h
index 63b3052098..45ceaf113c 100644
--- a/include/crm/common/scheduler_internal.h
+++ b/include/crm/common/scheduler_internal.h
@@ -1,289 +1,298 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
#define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
#include <crm/common/action_relation_internal.h>
#include <crm/common/actions_internal.h>
#include <crm/common/attrs_internal.h>
#include <crm/common/bundles_internal.h>
#include <crm/common/clone_internal.h>
#include <crm/common/digest_internal.h>
#include <crm/common/failcounts_internal.h>
#include <crm/common/group_internal.h>
#include <crm/common/history_internal.h>
#include <crm/common/location_internal.h>
#include <crm/common/nodes_internal.h>
#include <crm/common/primitive_internal.h>
#include <crm/common/remote_internal.h>
#include <crm/common/resources_internal.h>
#include <crm/common/roles_internal.h>
#include <crm/common/rules_internal.h>
#include <crm/common/tickets_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
enum pcmk__check_parameters {
/* Clear fail count if parameters changed for un-expired start or monitor
* last_failure.
*/
pcmk__check_last_failure,
/* Clear fail count if parameters changed for start, monitor, promote, or
* migrate_from actions for active resources.
*/
pcmk__check_active,
};
// Scheduling options and conditions
enum pcmk__scheduler_flags {
// No scheduler flags set (compare with equality rather than bit set)
pcmk__sched_none = 0ULL,
/* These flags are dynamically determined conditions */
// Whether partition has quorum (via \c PCMK_XA_HAVE_QUORUM attribute)
//! \deprecated Call pcmk_has_quorum() to check quorum instead
pcmk__sched_quorate = (1ULL << 0),
// Whether cluster is symmetric (via symmetric-cluster property)
pcmk__sched_symmetric_cluster = (1ULL << 1),
// Whether scheduling encountered a non-configuration error
pcmk__sched_processing_error = (1ULL << 2),
// Whether cluster is in maintenance mode (via maintenance-mode property)
pcmk__sched_in_maintenance = (1ULL << 3),
// Whether fencing is enabled (via stonith-enabled property)
pcmk__sched_fencing_enabled = (1ULL << 4),
// Whether cluster has a fencing resource (via CIB resources)
/*! \deprecated To indicate the cluster has a fencing resource, add either a
* fencing resource configuration or the have-watchdog cluster option to the
* input CIB
*/
pcmk__sched_have_fencing = (1ULL << 5),
// Whether any resource provides or requires unfencing (via CIB resources)
pcmk__sched_enable_unfencing = (1ULL << 6),
// Whether concurrent fencing is allowed (via concurrent-fencing property)
pcmk__sched_concurrent_fencing = (1ULL << 7),
/*
* Whether resources removed from the configuration should be stopped (via
* stop-orphan-resources property)
*/
pcmk__sched_stop_removed_resources = (1ULL << 8),
/*
* Whether recurring actions removed from the configuration should be
* cancelled (via stop-orphan-actions property)
*/
pcmk__sched_cancel_removed_actions = (1ULL << 9),
// Whether to stop all resources (via stop-all-resources property)
pcmk__sched_stop_all = (1ULL << 10),
// Whether scheduler processing encountered a warning
pcmk__sched_processing_warning = (1ULL << 11),
/*
* Whether start failure should be treated as if
* \c PCMK_META_MIGRATION_THRESHOLD is 1 (via
* \c PCMK_OPT_START_FAILURE_IS_FATAL property)
*/
pcmk__sched_start_failure_fatal = (1ULL << 12),
// Whether unseen nodes should be fenced (via startup-fencing property)
pcmk__sched_startup_fencing = (1ULL << 14),
/*
* Whether resources should be left stopped when their node shuts down
* cleanly (via shutdown-lock property)
*/
pcmk__sched_shutdown_lock = (1ULL << 15),
/*
* Whether resources' current state should be probed (when unknown) before
* scheduling any other actions (via the enable-startup-probes property)
*/
pcmk__sched_probe_resources = (1ULL << 16),
// Whether the CIB status section has been parsed yet
pcmk__sched_have_status = (1ULL << 17),
// Whether the cluster includes any Pacemaker Remote nodes (via CIB)
pcmk__sched_have_remote_nodes = (1ULL << 18),
/* The remaining flags are scheduling options that must be set explicitly */
/*
* Whether to skip unpacking the CIB status section and stop the scheduling
* sequence after applying node-specific location criteria (skipping
* assignment, ordering, actions, etc.).
*/
pcmk__sched_location_only = (1ULL << 20),
// Whether sensitive resource attributes have been masked
pcmk__sched_sanitized = (1ULL << 21),
// Skip counting of total, disabled, and blocked resource instances
pcmk__sched_no_counts = (1ULL << 23),
// Whether node scores should be output instead of logged
pcmk__sched_output_scores = (1ULL << 25),
// Whether to show node and resource utilization (in log or output)
pcmk__sched_show_utilization = (1ULL << 26),
/*
* Whether to stop the scheduling sequence after unpacking the CIB,
* calculating cluster status, and applying node health (skipping
* applying node-specific location criteria, assignment, etc.)
*/
pcmk__sched_validate_only = (1ULL << 27),
};
// Implementation of pcmk__scheduler_private_t
struct pcmk__scheduler_private {
// Be careful about when each piece of information is available and final
char *local_node_name; // Name of node running scheduler (if known)
crm_time_t *now; // Time to use when evaluating rules
pcmk__output_t *out; // Output object for displaying messages
GHashTable *options; // Cluster options
const char *fence_action; // Default fencing action
guint fence_timeout_ms; // Default fencing action timeout (in ms)
guint priority_fencing_ms; // Priority-based fencing delay (in ms)
guint shutdown_lock_ms; // How long to lock resources (in ms)
guint node_pending_ms; // Pending join times out after this (in ms)
const char *placement_strategy; // Value of placement-strategy property
xmlNode *rsc_defaults; // Configured resource defaults
xmlNode *op_defaults; // Configured operation defaults
GList *resources; // Resources in cluster
GHashTable *templates; // Key = template ID, value = resource list
GHashTable *tags; // Key = tag ID, value = element list
GList *actions; // All scheduled actions
GHashTable *singletons; // Scheduled non-resource actions
int next_action_id; // Counter used as ID for actions
xmlNode *failed; // History entries of failed actions
GList *param_check; // History entries that need to be checked
GList *stop_needed; // Containers that need stop actions
GList *location_constraints; // Location constraints
GList *colocation_constraints; // Colocation constraints
GList *ordering_constraints; // Ordering constraints
GHashTable *ticket_constraints; // Key = ticket ID, value = pcmk__ticket_t
int next_ordering_id; // Counter used as ID for orderings
int ninstances; // Total number of resource instances
int blocked_resources; // Number of blocked resources in cluster
int disabled_resources; // Number of disabled resources in cluster
time_t recheck_by; // Hint to controller when to reschedule
xmlNode *graph; // Transition graph
int synapse_count; // Number of transition graph synapses
};
// Group of enum pcmk__warnings flags for warnings we want to log once
extern uint32_t pcmk__warnings;
/*!
* \internal
* \brief Log a resource-tagged message at info severity
*
* \param[in] rsc Tag message with this resource's ID
* \param[in] fmt... printf(3)-style format and arguments
*/
#define pcmk__rsc_info(rsc, fmt, args...) \
crm_log_tag(LOG_INFO, ((rsc) == NULL)? "<NULL>" : (rsc)->id, (fmt), ##args)
/*!
* \internal
* \brief Log a resource-tagged message at debug severity
*
* \param[in] rsc Tag message with this resource's ID
* \param[in] fmt... printf(3)-style format and arguments
*/
#define pcmk__rsc_debug(rsc, fmt, args...) \
crm_log_tag(LOG_DEBUG, ((rsc) == NULL)? "<NULL>" : (rsc)->id, (fmt), ##args)
/*!
* \internal
* \brief Log a resource-tagged message at trace severity
*
* \param[in] rsc Tag message with this resource's ID
* \param[in] fmt... printf(3)-style format and arguments
*/
#define pcmk__rsc_trace(rsc, fmt, args...) \
crm_log_tag(LOG_TRACE, ((rsc) == NULL)? "<NULL>" : (rsc)->id, (fmt), ##args)
/*!
* \internal
* \brief Log an error and remember that current scheduler input has errors
*
* \param[in,out] scheduler Scheduler data
* \param[in] fmt... printf(3)-style format and arguments
*/
#define pcmk__sched_err(scheduler, fmt...) do { \
pcmk__set_scheduler_flags((scheduler), \
pcmk__sched_processing_error); \
crm_err(fmt); \
} while (0)
/*!
* \internal
* \brief Log a warning and remember that current scheduler input has warnings
*
* \param[in,out] scheduler Scheduler data
* \param[in] fmt... printf(3)-style format and arguments
*/
#define pcmk__sched_warn(scheduler, fmt...) do { \
pcmk__set_scheduler_flags((scheduler), \
pcmk__sched_processing_warning); \
crm_warn(fmt); \
} while (0)
/*!
* \internal
* \brief Set scheduler flags
*
* \param[in,out] scheduler Scheduler data
* \param[in] flags_to_set Group of enum pcmk__scheduler_flags to set
*/
#define pcmk__set_scheduler_flags(scheduler, flags_to_set) do { \
(scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", crm_system_name, \
(scheduler)->flags, (flags_to_set), #flags_to_set); \
} while (0)
/*!
* \internal
* \brief Clear scheduler flags
*
* \param[in,out] scheduler Scheduler data
* \param[in] flags_to_clear Group of enum pcmk__scheduler_flags to clear
*/
#define pcmk__clear_scheduler_flags(scheduler, flags_to_clear) do { \
(scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", crm_system_name, \
(scheduler)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
void pcmk__set_scheduler_defaults(pcmk_scheduler_t *scheduler);
time_t pcmk__scheduler_epoch_time(pcmk_scheduler_t *scheduler);
+void pcmk__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
+ pcmk_node_t *node, enum pcmk__check_parameters,
+ pcmk_scheduler_t *scheduler);
+void pcmk__foreach_param_check(pcmk_scheduler_t *scheduler,
+ void (*cb)(pcmk_resource_t*, pcmk_node_t*,
+ const xmlNode*,
+ enum pcmk__check_parameters));
+void pcmk__free_param_checks(pcmk_scheduler_t *scheduler);
+
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 0a6230c0a9..eaeece1f53 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,412 +1,404 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_PENGINE_INTERNAL__H
#define PCMK__CRM_PENGINE_INTERNAL__H
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <crm/common/xml.h>
#include <crm/pengine/status.h>
#include <crm/pengine/remote_internal.h>
#include <crm/common/internal.h>
#include <crm/common/options_internal.h>
#include <crm/common/output_internal.h>
#include <crm/common/scheduler_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
const char *pe__resource_description(const pcmk_resource_t *rsc,
uint32_t show_opts);
bool pe__clone_is_ordered(const pcmk_resource_t *clone);
int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag);
bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags);
bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags);
pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group);
const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc,
bool include_bundle);
int pe__clone_max(const pcmk_resource_t *clone);
int pe__clone_node_max(const pcmk_resource_t *clone);
int pe__clone_promoted_max(const pcmk_resource_t *clone);
int pe__clone_promoted_node_max(const pcmk_resource_t *clone);
void pe__create_clone_notifications(pcmk_resource_t *clone);
void pe__free_clone_notification_data(pcmk_resource_t *clone);
void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
pcmk_action_t *start,
pcmk_action_t *started,
pcmk_action_t *stop,
pcmk_action_t *stopped);
pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task,
bool optional, bool runnable);
void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone,
bool any_promoting, bool any_demoting);
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node);
char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
const char *name, pcmk_scheduler_t *scheduler);
pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list,
uint32_t target);
void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_scheduler_t *scheduler, gboolean failed);
gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id,
const pcmk_node_t *node, int flags);
gboolean native_active(pcmk_resource_t *rsc, gboolean all);
gboolean group_active(pcmk_resource_t *rsc, gboolean all);
gboolean clone_active(pcmk_resource_t *rsc, gboolean all);
gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all);
gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name,
...) G_GNUC_NULL_TERMINATED;
char *pe__node_display_name(pcmk_node_t *node, bool print_detail);
// Clone notifications (pe_notif.c)
void pe__order_notifs_after_fencing(const pcmk_action_t *action,
pcmk_resource_t *rsc,
pcmk_action_t *stonith_op);
// Resource output methods
int pe__clone_xml(pcmk__output_t *out, va_list args);
int pe__clone_default(pcmk__output_t *out, va_list args);
int pe__group_xml(pcmk__output_t *out, va_list args);
int pe__group_default(pcmk__output_t *out, va_list args);
int pe__bundle_xml(pcmk__output_t *out, va_list args);
int pe__bundle_html(pcmk__output_t *out, va_list args);
int pe__bundle_text(pcmk__output_t *out, va_list args);
int pe__node_html(pcmk__output_t *out, va_list args);
int pe__node_text(pcmk__output_t *out, va_list args);
int pe__node_xml(pcmk__output_t *out, va_list args);
int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
void native_free(pcmk_resource_t *rsc);
void group_free(pcmk_resource_t *rsc);
void clone_free(pcmk_resource_t *rsc);
void pe__free_bundle(pcmk_resource_t *rsc);
enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc,
gboolean current);
enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc,
gboolean current);
enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc,
gboolean current);
enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc,
gboolean current);
void pe__count_common(pcmk_resource_t *rsc);
void pe__count_bundle(pcmk_resource_t *rsc);
void common_free(pcmk_resource_t *rsc);
pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node);
/* Failure handling utilities (from failcounts.c) */
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
time_t *last_failure, uint32_t flags,
const xmlNode *xml_op);
pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc,
const pcmk_node_t *node, const char *reason,
pcmk_scheduler_t *scheduler);
/* Functions for finding/counting a resource's active nodes */
bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean);
pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc,
unsigned int *count);
/* Binary like operators for lists of nodes */
GHashTable *pe__node_list2table(const GList *list);
pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler);
gboolean order_actions(pcmk_action_t *first, pcmk_action_t *then,
uint32_t flags);
void pe__show_node_scores_as(const char *file, const char *function,
int line, bool to_log, const pcmk_resource_t *rsc,
const char *comment, GHashTable *nodes,
pcmk_scheduler_t *scheduler);
#define pe__show_node_scores(level, rsc, text, nodes, scheduler) \
pe__show_node_scores_as(__FILE__, __func__, __LINE__, \
(level), (rsc), (text), (nodes), (scheduler))
GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc,
const pcmk_node_t *node,
const char *action_name, guint interval_ms,
const xmlNode *action_config);
GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
GHashTable *node_attrs,
pcmk_scheduler_t *data_set);
xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc,
const char *action_name, guint interval_ms,
bool include_disabled);
enum pcmk__requires pcmk__action_requires(const pcmk_resource_t *rsc,
const char *action_name);
enum pcmk__on_fail pcmk__parse_on_fail(const pcmk_resource_t *rsc,
const char *action_name,
guint interval_ms, const char *value);
enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc,
const char *action_name,
enum pcmk__on_fail on_fail,
GHashTable *meta);
pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task,
const pcmk_node_t *on_node, gboolean optional,
pcmk_scheduler_t *scheduler);
#define delete_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_DELETE, 0)
#define stop_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_STOP, 0)
#define reload_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_RELOAD_AGENT, 0)
#define start_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_START, 0)
#define promote_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_PROMOTE, 0)
#define demote_key(rsc) pcmk__op_key((rsc)->id, PCMK_ACTION_DEMOTE, 0)
#define delete_action(rsc, node, optional) \
custom_action((rsc), delete_key(rsc), PCMK_ACTION_DELETE, \
(node), (optional), (rsc)->priv->scheduler)
#define stop_action(rsc, node, optional) \
custom_action((rsc), stop_key(rsc), PCMK_ACTION_STOP, \
(node), (optional), (rsc)->priv->scheduler)
#define start_action(rsc, node, optional) \
custom_action((rsc), start_key(rsc), PCMK_ACTION_START, \
(node), (optional), (rsc)->priv->scheduler)
#define promote_action(rsc, node, optional) \
custom_action((rsc), promote_key(rsc), PCMK_ACTION_PROMOTE, \
(node), (optional), (rsc)->priv->scheduler)
#define demote_action(rsc, node, optional) \
custom_action((rsc), demote_key(rsc), PCMK_ACTION_DEMOTE, \
(node), (optional), (rsc)->priv->scheduler)
pcmk_action_t *find_first_action(const GList *input, const char *uuid,
const char *task, const pcmk_node_t *on_node);
enum pcmk__action_type get_complex_task(const pcmk_resource_t *rsc,
const char *name);
GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
const pcmk_node_t *on_node);
GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node);
extern void pe_free_action(pcmk_action_t *action);
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag, pcmk_scheduler_t *scheduler);
int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role);
void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role,
const char *why);
extern void destroy_ticket(gpointer data);
pcmk__ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
pe_base_name_eq(const pcmk_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
}
return false;
}
int pe__target_rc_from_xml(const xmlNode *xml_op);
gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any);
pcmk__op_digest_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
guint *interval_ms,
const pcmk_node_t *node,
const xmlNode *xml_op,
GHashTable *overrides,
bool calc_secure,
pcmk_scheduler_t *scheduler);
void pe__free_digests(gpointer ptr);
pcmk__op_digest_t *rsc_action_digest_cmp(pcmk_resource_t *rsc,
const xmlNode *xml_op,
pcmk_node_t *node,
pcmk_scheduler_t *scheduler);
pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay,
pcmk_scheduler_t *scheduler);
void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node,
const char *reason, pcmk_action_t *dependency,
pcmk_scheduler_t *scheduler);
char *pe__action2reason(const pcmk_action_t *action,
enum pcmk__action_flags flag);
void pe_action_set_reason(pcmk_action_t *action, const char *reason,
bool overwrite);
void pe__add_action_expected_result(pcmk_action_t *action, int expected_result);
void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler,
uint64_t flag);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay);
pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type,
int score, pcmk_scheduler_t *scheduler);
int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
unsigned int options);
int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
const char *name, const pcmk_node_t *node,
unsigned int options);
GList *pe__bundle_containers(const pcmk_resource_t *bundle);
int pe__bundle_max(const pcmk_resource_t *rsc);
bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
const pcmk_node_t *node);
pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc);
const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance);
pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle);
void pe__foreach_bundle_replica(pcmk_resource_t *bundle,
bool (*fn)(pcmk__bundle_replica_t *, void *),
void *user_data);
void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
bool (*fn)(const pcmk__bundle_replica_t *,
void *),
void *user_data);
pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle,
const pcmk_node_t *node);
bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc);
const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml,
const char *field);
bool pe__is_universal_clone(const pcmk_resource_t *rsc,
const pcmk_scheduler_t *scheduler);
-void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
- pcmk_node_t *node, enum pcmk__check_parameters,
- pcmk_scheduler_t *scheduler);
-void pe__foreach_param_check(pcmk_scheduler_t *scheduler,
- void (*cb)(pcmk_resource_t*, pcmk_node_t*,
- const xmlNode*,
- enum pcmk__check_parameters));
-void pe__free_param_checks(pcmk_scheduler_t *scheduler);
bool pe__shutdown_requested(const pcmk_node_t *node);
void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
const char *reason);
/*!
* \internal
* \brief Register xml formatting message functions.
*
* \param[in,out] out Output object to register messages with
*/
void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pcmk_rule_input_t *rule_input,
GHashTable *hash, const char *always_first,
pcmk_scheduler_t *scheduler);
bool pe__resource_is_disabled(const pcmk_resource_t *rsc);
void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node);
GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc,
const char *tag);
bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node,
const char *tag);
bool pe__rsc_running_on_only(const pcmk_resource_t *rsc,
const pcmk_node_t *node);
bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s);
GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s);
bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node);
gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name);
const char *pe__clone_child_id(const pcmk_resource_t *rsc);
int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health);
int pe__node_health(pcmk_node_t *node);
static inline enum pcmk__health_strategy
pe__health_strategy(pcmk_scheduler_t *scheduler)
{
const char *strategy = pcmk__cluster_option(scheduler->priv->options,
PCMK_OPT_NODE_HEALTH_STRATEGY);
return pcmk__parse_health_strategy(strategy);
}
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_PENGINE_INTERNAL__H
diff --git a/lib/common/scheduler.c b/lib/common/scheduler.c
index 020de6283d..da0219973e 100644
--- a/lib/common/scheduler.c
+++ b/lib/common/scheduler.c
@@ -1,158 +1,242 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdint.h> // uint32_t
#include <errno.h> // EINVAL
-#include <glib.h> // gboolean, FALSE
+#include <glib.h> // gboolean, FALSE, etc.
#include <libxml/tree.h> // xmlNode
#include <crm/common/scheduler.h>
uint32_t pcmk__warnings = 0;
/*!
* \internal
* \brief Set non-zero default values in scheduler data
*
* \param[in,out] scheduler Scheduler data to modify
*
* \note Values that default to NULL or 0 will remain unchanged
*/
void
pcmk__set_scheduler_defaults(pcmk_scheduler_t *scheduler)
{
pcmk__assert(scheduler != NULL);
scheduler->flags = 0U;
#if PCMK__CONCURRENT_FENCING_DEFAULT_TRUE
pcmk__set_scheduler_flags(scheduler,
pcmk__sched_symmetric_cluster
|pcmk__sched_concurrent_fencing
|pcmk__sched_stop_removed_resources
|pcmk__sched_cancel_removed_actions);
#else
pcmk__set_scheduler_flags(scheduler,
pcmk__sched_symmetric_cluster
|pcmk__sched_stop_removed_resources
|pcmk__sched_cancel_removed_actions);
#endif
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
scheduler->priv->next_action_id = 1;
scheduler->priv->next_ordering_id = 1;
}
/*!
* \internal
* \brief Get the Designated Controller node from scheduler data
*
* \param[in] scheduler Scheduler data
*
* \return Designated Controller node from scheduler data, or NULL if none
*/
pcmk_node_t *
pcmk_get_dc(const pcmk_scheduler_t *scheduler)
{
return (scheduler == NULL)? NULL : scheduler->dc_node;
}
/*!
* \internal
* \brief Get the no quorum policy from scheduler data
*
* \param[in] scheduler Scheduler data
*
* \return No quorum policy from scheduler data
*/
enum pe_quorum_policy
pcmk_get_no_quorum_policy(const pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return pcmk_no_quorum_stop; // The default
}
return scheduler->no_quorum_policy;
}
/*!
* \internal
* \brief Set CIB XML as scheduler input in scheduler data
*
* \param[out] scheduler Scheduler data
* \param[in] cib CIB XML to set as scheduler input
*
* \return Standard Pacemaker return code (EINVAL if \p scheduler is NULL,
* otherwise pcmk_rc_ok)
* \note This will not free any previously set scheduler CIB.
*/
int
pcmk_set_scheduler_cib(pcmk_scheduler_t *scheduler, xmlNode *cib)
{
if (scheduler == NULL) {
return EINVAL;
}
scheduler->input = cib;
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Check whether cluster has quorum
*
* \param[in] scheduler Scheduler data
*
* \return true if cluster has quorum, otherwise false
*/
bool
pcmk_has_quorum(const pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return false;
}
return pcmk_is_set(scheduler->flags, pcmk__sched_quorate);
}
/*!
* \brief Find a node by name in scheduler data
*
* \param[in] scheduler Scheduler data
* \param[in] node_name Name of node to find
*
* \return Node from scheduler data that matches \p node_name if any,
* otherwise NULL
*/
pcmk_node_t *
pcmk_find_node(const pcmk_scheduler_t *scheduler, const char *node_name)
{
if ((scheduler == NULL) || (node_name == NULL)) {
return NULL;
}
return pcmk__find_node_in_list(scheduler->nodes, node_name);
}
/*!
* \internal
* \brief Get scheduler data's "now" in epoch time
*
* \param[in,out] scheduler Scheduler data
*
* \return Scheduler data's "now" as seconds since epoch (defaulting to current
* time)
*/
time_t
pcmk__scheduler_epoch_time(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return time(NULL);
}
if (scheduler->priv->now == NULL) {
crm_trace("Scheduler 'now' set to current time");
scheduler->priv->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(scheduler->priv->now);
}
+
+/* Fail count clearing for parameter changes normally happens when unpacking
+ * history, before resources are unpacked. However, for bundles using the
+ * REMOTE_CONTAINER_HACK, we can't check the conditions until after unpacking
+ * the bundle, so those parameter checks are deferred using the APIs below.
+ */
+
+// History entry to be checked later for fail count clearing
+struct param_check {
+ const xmlNode *rsc_history; // History entry XML
+ pcmk_resource_t *rsc; // Resource corresponding to history entry
+ pcmk_node_t *node; // Node corresponding to history entry
+ enum pcmk__check_parameters check_type; // What needs checking
+};
+
+/*!
+ * \internal
+ * \brief Add a deferred parameter check
+ *
+ * \param[in] rsc_history Resource history XML to check later
+ * \param[in] rsc Resource that history is for
+ * \param[in] node Node that history is for
+ * \param[in] flag What needs to be checked later
+ * \param[in,out] scheduler Scheduler data
+ */
+void
+pcmk__add_param_check(const xmlNode *rsc_history, pcmk_resource_t *rsc,
+ pcmk_node_t *node, enum pcmk__check_parameters flag,
+ pcmk_scheduler_t *scheduler)
+{
+ struct param_check *param_check = NULL;
+
+ CRM_CHECK((rsc_history != NULL) && (rsc != NULL) && (node != NULL)
+ && (scheduler != NULL), return);
+
+ crm_trace("Deferring checks of %s until after assignment",
+ pcmk__xe_id(rsc_history));
+ param_check = pcmk__assert_alloc(1, sizeof(struct param_check));
+ param_check->rsc_history = rsc_history;
+ param_check->rsc = rsc;
+ param_check->node = node;
+ param_check->check_type = flag;
+ scheduler->priv->param_check = g_list_prepend(scheduler->priv->param_check,
+ param_check);
+}
+
+/*!
+ * \internal
+ * \brief Call a function for each deferred parameter check
+ *
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] cb Function to be called
+ */
+void
+pcmk__foreach_param_check(pcmk_scheduler_t *scheduler,
+ void (*cb)(pcmk_resource_t*, pcmk_node_t*,
+ const xmlNode*,
+ enum pcmk__check_parameters))
+{
+ CRM_CHECK((scheduler != NULL) && (cb != NULL), return);
+
+ for (GList *item = scheduler->priv->param_check;
+ item != NULL; item = item->next) {
+ struct param_check *param_check = item->data;
+
+ cb(param_check->rsc, param_check->node, param_check->rsc_history,
+ param_check->check_type);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Free all deferred parameter checks
+ *
+ * \param[in,out] scheduler Scheduler data
+ */
+void
+pcmk__free_param_checks(pcmk_scheduler_t *scheduler)
+{
+ if ((scheduler != NULL) && (scheduler->priv->param_check != NULL)) {
+ g_list_free_full(scheduler->priv->param_check, free);
+ scheduler->priv->param_check = NULL;
+ }
+}
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index e6ed67c0d9..70e5f9083b 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1942 +1,1942 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <sys/param.h>
#include <glib.h>
#include <crm/lrmd_internal.h>
#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Get the action flags relevant to ordering constraints
*
* \param[in,out] action Action to check
* \param[in] node Node that *other* action in the ordering is on
* (used only for clone resource actions)
*
* \return Action flags that should be used for orderings
*/
static uint32_t
action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
{
bool runnable = false;
uint32_t flags;
// For non-resource actions, return the action flags
if (action->rsc == NULL) {
return action->flags;
}
/* For non-clone resources, or a clone action not assigned to a node,
* return the flags as determined by the resource method without a node
* specified.
*/
flags = action->rsc->priv->cmds->action_flags(action, NULL);
if ((node == NULL) || !pcmk__is_clone(action->rsc)) {
return flags;
}
/* Otherwise (i.e., for clone resource actions on a specific node), first
* remember whether the non-node-specific action is runnable.
*/
runnable = pcmk_is_set(flags, pcmk__action_runnable);
// Then recheck the resource method with the node
flags = action->rsc->priv->cmds->action_flags(action, node);
/* For clones in ordering constraints, the node-specific "runnable" doesn't
* matter, just the non-node-specific setting (i.e., is the action runnable
* anywhere).
*
* This applies only to runnable, and only for ordering constraints. This
* function shouldn't be used for other types of constraints without
* changes. Not very satisfying, but it's logical and appears to work well.
*/
if (runnable && !pcmk_is_set(flags, pcmk__action_runnable)) {
pcmk__set_raw_action_flags(flags, action->rsc->id,
pcmk__action_runnable);
}
return flags;
}
/*!
* \internal
* \brief Get action UUID that should be used with a resource ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the UUID and resource of the first action in an
* ordering, this returns the UUID of the action that should actually be used
* for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
*
* \param[in] first_uuid UUID of first action in ordering
* \param[in] first_rsc Resource of first action in ordering
*
* \return Newly allocated copy of UUID to use with ordering
* \note It is the caller's responsibility to free the return value.
*/
static char *
action_uuid_for_ordering(const char *first_uuid,
const pcmk_resource_t *first_rsc)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *first_task_str = NULL;
enum pcmk__action_type first_task = pcmk__action_unspecified;
enum pcmk__action_type remapped_task = pcmk__action_unspecified;
// Only non-notify actions for collective resources need remapping
if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
|| (first_rsc->priv->variant < pcmk__rsc_variant_group)) {
goto done;
}
// Only non-recurring actions need remapping
pcmk__assert(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
if (interval_ms > 0) {
goto done;
}
first_task = pcmk__parse_action(first_task_str);
switch (first_task) {
case pcmk__action_stop:
case pcmk__action_start:
case pcmk__action_notify:
case pcmk__action_promote:
case pcmk__action_demote:
remapped_task = first_task + 1;
break;
case pcmk__action_stopped:
case pcmk__action_started:
case pcmk__action_notified:
case pcmk__action_promoted:
case pcmk__action_demoted:
remapped_task = first_task;
break;
case pcmk__action_monitor:
case pcmk__action_shutdown:
case pcmk__action_fence:
break;
default:
crm_err("Unknown action '%s' in ordering", first_task_str);
break;
}
if (remapped_task != pcmk__action_unspecified) {
/* If a clone or bundle has notifications enabled, the ordering will be
* relative to when notifications have been sent for the remapped task.
*/
if (pcmk_is_set(first_rsc->flags, pcmk__rsc_notify)
&& (pcmk__is_clone(first_rsc) || pcmk__is_bundled(first_rsc))) {
uuid = pcmk__notify_key(rid, "confirmed-post",
pcmk__action_text(remapped_task));
} else {
uuid = pcmk__op_key(rid, pcmk__action_text(remapped_task), 0);
}
pcmk__rsc_trace(first_rsc,
"Remapped action UUID %s to %s for ordering purposes",
first_uuid, uuid);
}
done:
free(first_task_str);
free(rid);
return (uuid != NULL)? uuid : pcmk__str_copy(first_uuid);
}
/*!
* \internal
* \brief Get actual action that should be used with an ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the first action in an ordering, this returns the
* the action that should actually be used for ordering (for example, the
* started action instead of the start action).
*
* \param[in] action First action in an ordering
*
* \return Actual action that should be used for the ordering
*/
static pcmk_action_t *
action_for_ordering(pcmk_action_t *action)
{
pcmk_action_t *result = action;
pcmk_resource_t *rsc = action->rsc;
if (rsc == NULL) {
return result;
}
if ((rsc->priv->variant >= pcmk__rsc_variant_group)
&& (action->uuid != NULL)) {
char *uuid = action_uuid_for_ordering(action->uuid, rsc);
result = find_first_action(rsc->priv->actions, uuid, NULL, NULL);
if (result == NULL) {
crm_warn("Not remapping %s to %s because %s does not have "
"remapped action", action->uuid, uuid, rsc->id);
result = action;
}
free(uuid);
}
return result;
}
/*!
* \internal
* \brief Wrapper for update_ordered_actions() method for readability
*
* \param[in,out] rsc Resource to call method for
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this
* node (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates
* (may include pcmk__action_optional to affect only
* mandatory actions, and pe_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static inline uint32_t
update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
return rsc->priv->cmds->update_ordered_actions(first, then, node, flags,
filter, type, scheduler);
}
/*!
* \internal
* \brief Update flags for ordering's actions appropriately for ordering's flags
*
* \param[in,out] first First action in an ordering
* \param[in,out] then Then action in an ordering
* \param[in] first_flags Action flags for \p first for ordering purposes
* \param[in] then_flags Action flags for \p then for ordering purposes
* \param[in,out] order Action wrapper for \p first in ordering
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags
*/
static uint32_t
update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
uint32_t first_flags, uint32_t then_flags,
pcmk__related_action_t *order,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
/* The node will only be used for clones. If interleaved, node will be NULL,
* otherwise the ordering scope will be limited to the node. Normally, the
* whole 'then' clone should restart if 'first' is restarted, so then->node
* is needed.
*/
pcmk_node_t *node = then->node;
if (pcmk_is_set(order->flags, pcmk__ar_first_implies_same_node_then)) {
/* For unfencing, only instances of 'then' on the same node as 'first'
* (the unfencing operation) should restart, so reset node to
* first->node, at which point this case is handled like a normal
* pcmk__ar_first_implies_then.
*/
pcmk__clear_relation_flags(order->flags,
pcmk__ar_first_implies_same_node_then);
pcmk__set_relation_flags(order->flags, pcmk__ar_first_implies_then);
node = first->node;
pcmk__rsc_trace(then->rsc,
"%s then %s: mapped "
"pcmk__ar_first_implies_same_node_then to "
"pcmk__ar_first_implies_then on %s",
first->uuid, then->uuid, pcmk__node_name(node));
}
if (pcmk_is_set(order->flags, pcmk__ar_first_implies_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk__action_optional,
pcmk__action_optional,
pcmk__ar_first_implies_then, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk__action_optional)
&& pcmk_is_set(then->flags, pcmk__action_optional)) {
pcmk__clear_action_flags(then, pcmk__action_optional);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_implies_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_intermediate_stop)
&& (then->rsc != NULL)) {
enum pcmk__action_flags restart = pcmk__action_optional
|pcmk__action_runnable;
changed |= update(then->rsc, first, then, node, first_flags, restart,
pcmk__ar_intermediate_stop, scheduler);
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_intermediate_stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_then_implies_first)) {
if (first->rsc != NULL) {
changed |= update(first->rsc, first, then, node, first_flags,
pcmk__action_optional,
pcmk__ar_then_implies_first, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk__action_optional)
&& pcmk_is_set(first->flags, pcmk__action_runnable)) {
pcmk__clear_action_flags(first, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_promoted_then_implies_first)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk__action_optional,
pcmk__action_optional,
pcmk__ar_promoted_then_implies_first, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after "
"pcmk__ar_promoted_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_min_runnable)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable, pcmk__ar_min_runnable,
scheduler);
} else if (pcmk_is_set(first_flags, pcmk__action_runnable)) {
// We have another runnable instance of "first"
then->runnable_before++;
/* Mark "then" as runnable if it requires a certain number of
* "before" instances to be runnable, and they now are.
*/
if ((then->runnable_before >= then->required_runnable_before)
&& !pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__set_action_flags(then, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_nested_remote_probe)
&& (then->rsc != NULL)) {
if (!pcmk_is_set(first_flags, pcmk__action_runnable)
&& (first->rsc != NULL)
&& (first->rsc->priv->active_nodes != NULL)) {
pcmk__rsc_trace(then->rsc,
"%s then %s: ignoring because first is stopping",
first->uuid, then->uuid);
order->flags = pcmk__ar_none;
} else {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_nested_remote_probe",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_unrunnable_first_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk__action_runnable)
&& pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__clear_action_flags(then, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_unmigratable_then_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_optional,
pcmk__ar_unmigratable_then_blocks, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after "
"pcmk__ar_unmigratable_then_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_first_else_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_optional, pcmk__ar_first_else_then,
scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_else_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_ordered)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable, pcmk__ar_ordered,
scheduler);
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_asymmetric)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable, pcmk__ar_asymmetric,
scheduler);
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(first->flags, pcmk__action_runnable)
&& pcmk_is_set(order->flags, pcmk__ar_first_implies_then_graphed)
&& !pcmk_is_set(first_flags, pcmk__action_optional)) {
pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
then->uuid, first->uuid);
pcmk__set_action_flags(then, pcmk__action_always_in_graph);
// Don't bother marking 'then' as changed just for this
}
if (pcmk_is_set(order->flags, pcmk__ar_then_implies_first_graphed)
&& !pcmk_is_set(then_flags, pcmk__action_optional)) {
pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
first->uuid, then->uuid);
pcmk__set_action_flags(first, pcmk__action_always_in_graph);
// Don't bother marking 'first' as changed just for this
}
if (pcmk_any_flags_set(order->flags, pcmk__ar_first_implies_then
|pcmk__ar_then_implies_first
|pcmk__ar_intermediate_stop)
&& (first->rsc != NULL)
&& !pcmk_is_set(first->rsc->flags, pcmk__rsc_managed)
&& pcmk_is_set(first->rsc->flags, pcmk__rsc_blocked)
&& !pcmk_is_set(first->flags, pcmk__action_runnable)
&& pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
if (pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__clear_action_flags(then, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after checking whether first "
"is blocked, unmanaged, unrunnable stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
return changed;
}
// Convenience macros for logging action properties
#define action_type_str(flags) \
(pcmk_is_set((flags), pcmk__action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
(pcmk_is_set((flags), pcmk__action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
(pcmk_is_set((flags), pcmk__action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->priv->name)
/*!
* \internal
* \brief Update an action's flags for all orderings where it is "then"
*
* \param[in,out] then Action to update
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__update_action_for_orderings(pcmk_action_t *then,
pcmk_scheduler_t *scheduler)
{
GList *lpc = NULL;
uint32_t changed = pcmk__updated_none;
int last_flags = then->flags;
pcmk__rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
action_type_str(then->flags), then->uuid,
action_optional_str(then->flags),
action_runnable_str(then->flags), action_node_str(then));
if (then->required_runnable_before > 0) {
/* Initialize current known "runnable before" actions. As
* update_action_for_ordering_flags() is called for each of then's
* before actions, this number will increment as runnable 'first'
* actions are encountered.
*/
then->runnable_before = 0;
/* The pcmk__ar_min_runnable clause of
* update_action_for_ordering_flags() (called below)
* will reset runnable if appropriate.
*/
pcmk__clear_action_flags(then, pcmk__action_runnable);
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *other = lpc->data;
pcmk_action_t *first = other->action;
pcmk_node_t *then_node = then->node;
pcmk_node_t *first_node = first->node;
const uint32_t target = pcmk__rsc_node_assigned;
if ((first->rsc != NULL)
&& pcmk__is_group(first->rsc)
&& pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
first_node = first->rsc->priv->fns->location(first->rsc, NULL,
target);
if (first_node != NULL) {
pcmk__rsc_trace(first->rsc, "Found %s for 'first' %s",
pcmk__node_name(first_node), first->uuid);
}
}
if (pcmk__is_group(then->rsc)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
then_node = then->rsc->priv->fns->location(then->rsc, NULL, target);
if (then_node != NULL) {
pcmk__rsc_trace(then->rsc, "Found %s for 'then' %s",
pcmk__node_name(then_node), then->uuid);
}
}
// Disable constraint if it only applies when on same node, but isn't
if (pcmk_is_set(other->flags, pcmk__ar_if_on_same_node)
&& (first_node != NULL) && (then_node != NULL)
&& !pcmk__same_node(first_node, then_node)) {
pcmk__rsc_trace(then->rsc,
"Disabled ordering %s on %s then %s on %s: "
"not same node",
other->action->uuid, pcmk__node_name(first_node),
then->uuid, pcmk__node_name(then_node));
other->flags = pcmk__ar_none;
continue;
}
pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
if ((first->rsc != NULL)
&& pcmk_is_set(other->flags, pcmk__ar_then_cancels_first)
&& !pcmk_is_set(then->flags, pcmk__action_optional)) {
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any agent reload).
*/
pcmk__set_action_flags(other->action, pcmk__action_optional);
if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
pcmk__clear_rsc_flags(first->rsc, pcmk__rsc_reload);
}
}
if ((first->rsc != NULL) && (then->rsc != NULL)
&& (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
first = action_for_ordering(first);
}
if (first != other->action) {
pcmk__rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
then->uuid, first->uuid, other->action->uuid);
}
pcmk__rsc_trace(then->rsc,
"%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
first->uuid, first->flags, then->uuid, then->flags,
other->flags, action_node_str(first));
if (first == other->action) {
/* 'first' was not remapped (e.g. from 'start' to 'running'), which
* could mean it is a non-resource action, a primitive resource
* action, or already expanded.
*/
uint32_t first_flags, then_flags;
first_flags = action_flags_for_ordering(first, then_node);
then_flags = action_flags_for_ordering(then, first_node);
changed |= update_action_for_ordering_flags(first, then,
first_flags, then_flags,
other, scheduler);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
*/
} else if (order_actions(first, then, other->flags)) {
/* This was the first time 'first' and 'then' were associated,
* start again to get the new actions_before list
*/
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
pcmk__rsc_trace(then->rsc,
"Disabled ordering %s then %s in favor of %s "
"then %s",
other->action->uuid, then->uuid, first->uuid,
then->uuid);
other->flags = pcmk__ar_none;
}
if (pcmk_is_set(changed, pcmk__updated_first)) {
crm_trace("Re-processing %s and its 'after' actions "
"because it changed", first->uuid);
for (GList *lpc2 = first->actions_after; lpc2 != NULL;
lpc2 = lpc2->next) {
pcmk__related_action_t *other = lpc2->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
pcmk__update_action_for_orderings(first, scheduler);
}
}
if (then->required_runnable_before > 0) {
if (last_flags == then->flags) {
pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
} else {
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
}
}
if (pcmk_is_set(changed, pcmk__updated_then)) {
crm_trace("Re-processing %s and its 'after' actions because it changed",
then->uuid);
if (pcmk_is_set(last_flags, pcmk__action_runnable)
&& !pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__block_colocation_dependents(then);
}
pcmk__update_action_for_orderings(then, scheduler);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *other = lpc->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
}
}
static inline bool
is_primitive_action(const pcmk_action_t *action)
{
return (action != NULL) && pcmk__is_primitive(action->rsc);
}
/*!
* \internal
* \brief Clear a single action flag and set reason text
*
* \param[in,out] action Action whose flag should be cleared
* \param[in] flag Action flag that should be cleared
* \param[in] reason Action that is the reason why flag is being cleared
*/
#define clear_action_flag_because(action, flag, reason) do { \
if (pcmk_is_set((action)->flags, (flag))) { \
pcmk__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
pe_action_set_reason((action), reason_text, false); \
free(reason_text); \
} \
} \
} while (0)
/*!
* \internal
* \brief Update actions in an asymmetric ordering
*
* If the "first" action in an asymmetric ordering is unrunnable, make the
* "second" action unrunnable as well, if appropriate.
*
* \param[in] first 'First' action in an asymmetric ordering
* \param[in,out] then 'Then' action in an asymmetric ordering
*/
static void
handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
{
/* Only resource actions after an unrunnable 'first' action need updates for
* asymmetric ordering.
*/
if ((then->rsc == NULL)
|| pcmk_is_set(first->flags, pcmk__action_runnable)) {
return;
}
// Certain optional 'then' actions are unaffected by unrunnable 'first'
if (pcmk_is_set(then->flags, pcmk__action_optional)) {
enum rsc_role_e then_rsc_role;
then_rsc_role = then->rsc->priv->fns->state(then->rsc, TRUE);
if ((then_rsc_role == pcmk_role_stopped)
&& pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* If 'then' should stop after 'first' but is already stopped, the
* ordering is irrelevant.
*/
return;
} else if ((then_rsc_role >= pcmk_role_started)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
&& pe__rsc_running_on_only(then->rsc, then->node)) {
/* Similarly if 'then' should start after 'first' but is already
* started on a single node.
*/
return;
}
}
// 'First' can't run, so 'then' can't either
clear_action_flag_because(then, pcmk__action_optional, first);
clear_action_flag_because(then, pcmk__action_runnable, first);
}
/*!
* \internal
* \brief Set action bits appropriately when pcmk__ar_intermediate_stop is used
*
* \param[in,out] first 'First' action in ordering
* \param[in,out] then 'Then' action in ordering
* \param[in] filter What action flags to care about
*
* \note pcmk__ar_intermediate_stop is set for "stop resource before starting
* it" and "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
uint32_t filter)
{
const char *reason = NULL;
pcmk__assert(is_primitive_action(first) && is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(then->flags, pcmk__action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pcmk__action_runnable)
&& !pcmk_is_set(then->flags, pcmk__action_runnable)
&& pcmk_is_set(then->rsc->flags, pcmk__rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pcmk__rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pcmk__action_runnable)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pcmk__action_migratable)) {
clear_action_flag_because(first, pcmk__action_migratable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pcmk__action_optional)
&& !pcmk_is_set(first->flags, pcmk__action_runnable)) {
clear_action_flag_because(then, pcmk__action_runnable, first);
}
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions' flags
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (ignored)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk__action_optional to affect only
* mandatory actions, and pcmk__action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
uint32_t then_flags = 0U;
uint32_t first_flags = 0U;
pcmk__assert((first != NULL) && (then != NULL) && (scheduler != NULL));
then_flags = then->flags;
first_flags = first->flags;
if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
handle_asymmetric_ordering(first, then);
}
if (pcmk_is_set(type, pcmk__ar_then_implies_first)
&& !pcmk_is_set(then_flags, pcmk__action_optional)) {
// Then is required, and implies first should be, too
if (pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(flags, pcmk__action_optional)
&& pcmk_is_set(first_flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
if (pcmk_is_set(flags, pcmk__action_migratable)
&& !pcmk_is_set(then->flags, pcmk__action_migratable)) {
clear_action_flag_because(first, pcmk__action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
&& (then->rsc != NULL)
&& (then->rsc->priv->orig_role == pcmk_role_promoted)
&& pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(then->flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
if (pcmk_is_set(first->flags, pcmk__action_migratable)
&& !pcmk_is_set(then->flags, pcmk__action_migratable)) {
clear_action_flag_because(first, pcmk__action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
&& pcmk_is_set(filter, pcmk__action_optional)) {
if (!pcmk_all_flags_set(then->flags, pcmk__action_migratable
|pcmk__action_runnable)) {
clear_action_flag_because(first, pcmk__action_runnable, then);
}
if (!pcmk_is_set(then->flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
}
if (pcmk_is_set(type, pcmk__ar_first_else_then)
&& pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(first->flags, pcmk__action_runnable)) {
clear_action_flag_because(then, pcmk__action_migratable, first);
pcmk__clear_action_flags(then, pcmk__action_pseudo);
}
if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
&& pcmk_is_set(filter, pcmk__action_runnable)
&& pcmk_is_set(then->flags, pcmk__action_runnable)
&& !pcmk_is_set(flags, pcmk__action_runnable)) {
clear_action_flag_because(then, pcmk__action_runnable, first);
clear_action_flag_because(then, pcmk__action_migratable, first);
}
if (pcmk_is_set(type, pcmk__ar_first_implies_then)
&& pcmk_is_set(filter, pcmk__action_optional)
&& pcmk_is_set(then->flags, pcmk__action_optional)
&& !pcmk_is_set(flags, pcmk__action_optional)
&& !pcmk_is_set(first->flags, pcmk__action_migratable)) {
clear_action_flag_because(then, pcmk__action_optional, first);
}
if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
pcmk__rsc_trace(then->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'first' %s (%#.6x)",
then->uuid, pcmk__node_name(then->node),
then->flags, then_flags, first->uuid, first->flags);
if ((then->rsc != NULL) && (then->rsc->priv->parent != NULL)) {
// Required to handle "X_stop then X_start" for cloned groups
pcmk__update_action_for_orderings(then, scheduler);
}
}
if (first_flags != first->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
pcmk__rsc_trace(first->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'then' %s (%#.6x)",
first->uuid, pcmk__node_name(first->node),
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
/*!
* \internal
* \brief Trace-log an action (optionally with its dependent actions)
*
* \param[in] pre_text If not NULL, prefix the log with this plus ": "
* \param[in] action Action to log
* \param[in] details If true, recursively log dependent actions
*/
void
pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
const char *desc = NULL;
CRM_CHECK(action != NULL, return);
if (!pcmk_is_set(action->flags, pcmk__action_pseudo)) {
if (action->node != NULL) {
node_uname = action->node->priv->name;
node_uuid = action->node->priv->id;
} else {
node_uname = "<none>";
}
}
switch (pcmk__parse_action(action->task)) {
case pcmk__action_fence:
case pcmk__action_shutdown:
if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
desc = "Pseudo ";
} else if (pcmk_is_set(action->flags, pcmk__action_optional)) {
desc = "Optional ";
} else if (!pcmk_is_set(action->flags, pcmk__action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
default:
if (pcmk_is_set(action->flags, pcmk__action_optional)) {
desc = "Optional ";
} else if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
desc = "Pseudo ";
} else if (!pcmk_is_set(action->flags, pcmk__action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(action->rsc? action->rsc->id : "<none>"),
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
}
if (details) {
const GList *iter = NULL;
const pcmk__related_action_t *other = NULL;
crm_trace("\t\t====== Preceding Actions");
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== Subsequent Actions");
for (iter = action->actions_after; iter != NULL; iter = iter->next) {
other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== End");
} else {
crm_trace("\t\t(before=%d, after=%d)",
g_list_length(action->actions_before),
g_list_length(action->actions_after));
}
}
/*!
* \internal
* \brief Create a new shutdown action for a node
*
* \param[in,out] node Node being shut down
*
* \return Newly created shutdown action for \p node
*/
pcmk_action_t *
pcmk__new_shutdown_action(pcmk_node_t *node)
{
char *shutdown_id = NULL;
pcmk_action_t *shutdown_op = NULL;
pcmk__assert(node != NULL);
shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
node->priv->name);
shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
node, FALSE, node->priv->scheduler);
pcmk__order_stops_before_shutdown(node, shutdown_op);
pcmk__insert_meta(shutdown_op, PCMK__META_OP_NO_WAIT, PCMK_VALUE_TRUE);
return shutdown_op;
}
/*!
* \internal
* \brief Calculate and add an operation digest to XML
*
* Calculate an operation digest, which enables us to later determine when a
* restart is needed due to the resource's parameters being changed, and add it
* to given XML.
*
* \param[in] op Operation result from executor
* \param[in,out] update XML to add digest to
*/
static void
add_op_digest_to_xml(const lrmd_event_data_t *op, xmlNode *update)
{
char *digest = NULL;
xmlNode *args_xml = NULL;
if (op->params == NULL) {
return;
}
args_xml = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
g_hash_table_foreach(op->params, hash2field, args_xml);
pcmk__filter_op_for_digest(args_xml);
digest = pcmk__digest_operation(args_xml);
crm_xml_add(update, PCMK__XA_OP_DIGEST, digest);
pcmk__xml_free(args_xml);
free(digest);
}
#define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
/*!
* \internal
* \brief Create XML for resource operation history update
*
* \param[in,out] parent Parent XML node to add to
* \param[in,out] op Operation event data
* \param[in] caller_version DC feature set
* \param[in] target_rc Expected result of operation
* \param[in] node Name of node on which operation was performed
* \param[in] origin Arbitrary description of update source
*
* \return Newly created XML node for history update
*/
xmlNode *
pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
const char *caller_version, int target_rc,
const char *node, const char *origin)
{
char *key = NULL;
char *magic = NULL;
char *op_id = NULL;
char *op_id_additional = NULL;
char *local_user_data = NULL;
const char *exit_reason = NULL;
xmlNode *xml_op = NULL;
const char *task = NULL;
CRM_CHECK(op != NULL, return NULL);
crm_trace("Creating history XML for %s-interval %s action for %s on %s "
"(DC version: %s, origin: %s)",
pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
((node == NULL)? "no node" : node), caller_version, origin);
task = op->op_type;
/* Record a successful agent reload as a start, and a failed one as a
* monitor, to make life easier for the scheduler when determining the
* current state.
*
* @COMPAT We should check "reload" here only if the operation was for a
* pre-OCF-1.1 resource agent, but we don't know that here, and we should
* only ever get results for actions scheduled by us, so we can reasonably
* assume any "reload" is actually a pre-1.1 agent reload.
*/
if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
NULL)) {
if (op->op_status == PCMK_EXEC_DONE) {
task = PCMK_ACTION_START;
} else {
task = PCMK_ACTION_MONITOR;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
CRM_LOG_ASSERT(n_type != NULL);
CRM_LOG_ASSERT(n_task != NULL);
op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
if (op->op_status != PCMK_EXEC_PENDING) {
/* Ignore notify errors.
*
* @TODO It might be better to keep the correct result here, and
* ignore it in process_graph_event().
*/
lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
}
/* Migration history is preserved separately, which usually matters for
* multiple nodes and is important for future cluster transitions.
*/
} else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
op_id = strdup(key);
} else if (did_rsc_op_fail(op, target_rc)) {
op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
if (op->interval_ms == 0) {
/* Ensure 'last' gets updated, in case PCMK_META_RECORD_PENDING is
* true
*/
op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
}
exit_reason = op->exit_reason;
} else if (op->interval_ms > 0) {
op_id = strdup(key);
} else {
op_id = pcmk__op_key(op->rsc_id, "last", 0);
}
again:
xml_op = pcmk__xe_first_child(parent, PCMK__XE_LRM_RSC_OP, PCMK_XA_ID,
op_id);
if (xml_op == NULL) {
xml_op = pcmk__xe_create(parent, PCMK__XE_LRM_RSC_OP);
}
if (op->user_data == NULL) {
crm_debug("Generating fake transition key for: " PCMK__OP_FMT
" %d from %s", op->rsc_id, op->op_type, op->interval_ms,
op->call_id, origin);
local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
FAKE_TE_ID);
op->user_data = local_user_data;
}
if (magic == NULL) {
magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
(const char *) op->user_data);
}
crm_xml_add(xml_op, PCMK_XA_ID, op_id);
crm_xml_add(xml_op, PCMK__XA_OPERATION_KEY, key);
crm_xml_add(xml_op, PCMK_XA_OPERATION, task);
crm_xml_add(xml_op, PCMK_XA_CRM_DEBUG_ORIGIN, origin);
crm_xml_add(xml_op, PCMK_XA_CRM_FEATURE_SET, caller_version);
crm_xml_add(xml_op, PCMK__XA_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, PCMK__XA_TRANSITION_MAGIC, magic);
crm_xml_add(xml_op, PCMK_XA_EXIT_REASON, pcmk__s(exit_reason, ""));
crm_xml_add(xml_op, PCMK__META_ON_NODE, node); // For context during triage
crm_xml_add_int(xml_op, PCMK__XA_CALL_ID, op->call_id);
crm_xml_add_int(xml_op, PCMK__XA_RC_CODE, op->rc);
crm_xml_add_int(xml_op, PCMK__XA_OP_STATUS, op->op_status);
crm_xml_add_ms(xml_op, PCMK_META_INTERVAL, op->interval_ms);
if ((op->t_run > 0) || (op->t_rcchange > 0) || (op->exec_time > 0)
|| (op->queue_time > 0)) {
crm_trace("Timing data (" PCMK__OP_FMT "): "
"last=%lld change=%lld exec=%u queue=%u",
op->rsc_id, op->op_type, op->interval_ms,
(long long) op->t_run, (long long) op->t_rcchange,
op->exec_time, op->queue_time);
if ((op->interval_ms > 0) && (op->t_rcchange > 0)) {
// Recurring ops may have changed rc after initial run
crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
(long long) op->t_rcchange);
} else {
crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
(long long) op->t_run);
}
crm_xml_add_int(xml_op, PCMK_XA_EXEC_TIME, op->exec_time);
crm_xml_add_int(xml_op, PCMK_XA_QUEUE_TIME, op->queue_time);
}
if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* Record PCMK__META_MIGRATE_SOURCE and PCMK__META_MIGRATE_TARGET always
* for migrate ops.
*/
const char *name = PCMK__META_MIGRATE_SOURCE;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
name = PCMK__META_MIGRATE_TARGET;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
}
add_op_digest_to_xml(op, xml_op);
if (op_id_additional) {
free(op_id);
op_id = op_id_additional;
op_id_additional = NULL;
goto again;
}
if (local_user_data) {
free(local_user_data);
op->user_data = NULL;
}
free(magic);
free(op_id);
free(key);
return xml_op;
}
/*!
* \internal
* \brief Check whether an action shutdown-locks a resource to a node
*
* If the PCMK_OPT_SHUTDOWN_LOCK cluster property is set, resources will not be
* recovered on a different node if cleanly stopped, and may start only on that
* same node. This function checks whether that applies to a given action, so
* that the transition graph can be marked appropriately.
*
* \param[in] action Action to check
*
* \return true if \p action locks its resource to the action's node,
* otherwise false
*/
bool
pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
{
// Only resource actions taking place on resource's lock node are locked
if ((action == NULL) || (action->rsc == NULL)
|| !pcmk__same_node(action->node, action->rsc->priv->lock_node)) {
return false;
}
/* During shutdown, only stops are locked (otherwise, another action such as
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && (action->task != NULL)
&& (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
return false;
}
return true;
}
/* lowest to highest */
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
const pcmk__related_action_t *action_wrapper2 = a;
const pcmk__related_action_t *action_wrapper1 = b;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (action_wrapper1->action->id < action_wrapper2->action->id) {
return 1;
}
if (action_wrapper1->action->id > action_wrapper2->action->id) {
return -1;
}
return 0;
}
/*!
* \internal
* \brief Remove any duplicate action inputs, merging action flags
*
* \param[in,out] action Action whose inputs should be checked
*/
void
pcmk__deduplicate_action_inputs(pcmk_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
pcmk__related_action_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
pcmk__related_action_t *input = item->data;
next = item->next;
if ((last_input != NULL)
&& (input->action->id == last_input->action->id)) {
crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
input->action->uuid, input->action->id,
action->uuid, action->id);
/* For the purposes of scheduling, the ordering flags no longer
* matter, but crm_simulate looks at certain ones when creating a
* dot graph. Combining the flags is sufficient for that purpose.
*/
pcmk__set_relation_flags(last_input->flags, input->flags);
if (input->graphed) {
last_input->graphed = true;
}
free(item->data);
action->actions_before = g_list_delete_link(action->actions_before,
item);
} else {
last_input = input;
input->graphed = false;
}
}
}
/*!
* \internal
* \brief Output all scheduled actions
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__output_actions(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv->out;
// Output node (non-resource) actions
for (GList *iter = scheduler->priv->actions;
iter != NULL; iter = iter->next) {
char *node_name = NULL;
char *task = NULL;
pcmk_action_t *action = (pcmk_action_t *) iter->data;
if (action->rsc != NULL) {
continue; // Resource actions will be output later
} else if (pcmk_is_set(action->flags, pcmk__action_optional)) {
continue; // This action was not scheduled
}
if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
task = strdup("Shutdown");
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta,
PCMK__META_STONITH_ACTION);
task = crm_strdup_printf("Fence (%s)", op);
} else {
continue; // Don't display other node action types
}
if (pcmk__is_guest_or_bundle_node(action->node)) {
const pcmk_resource_t *remote = action->node->priv->remote;
node_name = crm_strdup_printf("%s (resource: %s)",
pcmk__node_name(action->node),
remote->priv->launcher->id);
} else if (action->node != NULL) {
node_name = crm_strdup_printf("%s", pcmk__node_name(action->node));
}
out->message(out, "node-action", task, node_name, action->reason);
free(node_name);
free(task);
}
// Output resource actions
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->priv->cmds->output_actions(rsc);
}
}
/*!
* \internal
* \brief Get action name needed to compare digest for configuration changes
*
* \param[in] task Action name from history
* \param[in] interval_ms Action interval (in milliseconds)
*
* \return Action name whose digest should be compared
*/
static const char *
task_for_digest(const char *task, guint interval_ms)
{
/* Certain actions need to be compared against the parameters used to start
* the resource.
*/
if ((interval_ms == 0)
&& pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
PCMK_ACTION_PROMOTE, NULL)) {
task = PCMK_ACTION_START;
}
return task;
}
/*!
* \internal
* \brief Check whether only sanitized parameters to an action changed
*
* When collecting CIB files for troubleshooting, crm_report will mask
* sensitive resource parameters. If simulations were run using that, affected
* resources would appear to need a restart, which would complicate
* troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
* parameters. This function used that digest to check whether only masked
* parameters are different.
*
* \param[in] xml_op Resource history entry with secure digest
* \param[in] digest_data Operation digest information being compared
* \param[in] scheduler Scheduler data
*
* \return true if only sanitized parameters changed, otherwise false
*/
static bool
only_sanitized_changed(const xmlNode *xml_op,
const pcmk__op_digest_t *digest_data,
const pcmk_scheduler_t *scheduler)
{
const char *digest_secure = NULL;
if (!pcmk_is_set(scheduler->flags, pcmk__sched_sanitized)) {
// The scheduler is not being run as a simulation
return false;
}
digest_secure = crm_element_value(xml_op, PCMK__XA_OP_SECURE_DIGEST);
return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
&& (digest_data->digest_secure_calc != NULL)
&& (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
}
/*!
* \internal
* \brief Force a restart due to a configuration change
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action whose configuration changed
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in,out] node Node where resource should be restarted
*/
static void
force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
pcmk_node_t *node)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE,
rsc->priv->scheduler);
pe_action_set_reason(required, "resource definition change", true);
trigger_unfencing(rsc, node, "Device parameters changed", NULL,
rsc->priv->scheduler);
}
/*!
* \internal
* \brief Schedule a reload of a resource on a node
*
* \param[in,out] data Resource to reload
* \param[in] user_data Where resource should be reloaded
*/
static void
schedule_reload(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
pcmk_action_t *reload = NULL;
// For collective resources, just call recursively for children
if (rsc->priv->variant > pcmk__rsc_variant_primitive) {
g_list_foreach(rsc->priv->children, schedule_reload, user_data);
return;
}
// Skip the reload in certain situations
if ((node == NULL)
|| !pcmk_is_set(rsc->flags, pcmk__rsc_managed)
|| pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
pcmk__rsc_trace(rsc, "Skip reload of %s:%s%s %s",
rsc->id,
pcmk_is_set(rsc->flags, pcmk__rsc_managed)? "" : " unmanaged",
pcmk_is_set(rsc->flags, pcmk__rsc_failed)? " failed" : "",
(node == NULL)? "inactive" : node->priv->name);
return;
}
/* If a resource's configuration changed while a start was pending,
* force a full restart instead of a reload.
*/
if (pcmk_is_set(rsc->flags, pcmk__rsc_start_pending)) {
pcmk__rsc_trace(rsc,
"%s: preventing agent reload because start pending",
rsc->id);
custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->priv->scheduler);
return;
}
// Schedule the reload
pcmk__set_rsc_flags(rsc, pcmk__rsc_reload);
reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
FALSE, rsc->priv->scheduler);
pe_action_set_reason(reload, "resource definition change", FALSE);
// Set orderings so that a required stop or demote cancels the reload
pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->priv->scheduler);
pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->priv->scheduler);
}
/*!
* \internal
* \brief Handle any configuration change for an action
*
* Given an action from resource history, if the resource's configuration
* changed since the action was done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, etc.).
*
* \param[in,out] rsc Resource that action is for
* \param[in,out] node Node that action was on
* \param[in] xml_op Action XML from resource history
*
* \return true if action configuration changed, otherwise false
*/
bool
pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op)
{
guint interval_ms = 0;
const char *task = NULL;
const pcmk__op_digest_t *digest_data = NULL;
CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
return false);
task = crm_element_value(xml_op, PCMK_XA_OPERATION);
CRM_CHECK(task != NULL, return false);
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms);
// If this is a recurring action, check whether it has been orphaned
if (interval_ms > 0) {
if (pcmk__find_action_config(rsc, task, interval_ms, false) != NULL) {
pcmk__rsc_trace(rsc,
"%s-interval %s for %s on %s is in configuration",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
} else if (pcmk_is_set(rsc->priv->scheduler->flags,
pcmk__sched_cancel_removed_actions)) {
pcmk__schedule_cancel(rsc,
crm_element_value(xml_op, PCMK__XA_CALL_ID),
task, interval_ms, node, "orphan");
return true;
} else {
pcmk__rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
return true;
}
}
crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
task = task_for_digest(task, interval_ms);
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->priv->scheduler);
if (only_sanitized_changed(xml_op, digest_data, rsc->priv->scheduler)) {
if (!pcmk__is_daemon && (rsc->priv->scheduler->priv->out != NULL)) {
pcmk__output_t *out = rsc->priv->scheduler->priv->out;
out->info(out,
"Only 'private' parameters to %s-interval %s for %s "
"on %s changed: %s",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node),
crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC));
}
return false;
}
switch (digest_data->rc) {
case pcmk__digest_restart:
crm_log_xml_debug(digest_data->params_restart, "params:restart");
force_restart(rsc, task, interval_ms, node);
return true;
case pcmk__digest_unknown:
case pcmk__digest_mismatch:
// Changes that can potentially be handled by an agent reload
if (interval_ms > 0) {
/* Recurring actions aren't reloaded per se, they are just
* re-scheduled so the next run uses the new parameters.
* The old instance will be cancelled automatically.
*/
crm_log_xml_debug(digest_data->params_all, "params:reschedule");
pcmk__reschedule_recurring(rsc, task, interval_ms, node);
} else if (crm_element_value(xml_op,
PCMK__XA_OP_RESTART_DIGEST) != NULL) {
// Agent supports reload, so use it
trigger_unfencing(rsc, node,
"Device parameters changed (reload)", NULL,
rsc->priv->scheduler);
crm_log_xml_debug(digest_data->params_all, "params:reload");
schedule_reload((gpointer) rsc, (gpointer) node);
} else {
pcmk__rsc_trace(rsc,
"Restarting %s "
"because agent doesn't support reload",
rsc->id);
crm_log_xml_debug(digest_data->params_restart,
"params:restart");
force_restart(rsc, task, interval_ms, node);
}
return true;
default:
break;
}
return false;
}
/*!
* \internal
* \brief Create a list of resource's action history entries, sorted by call ID
*
* \param[in] rsc_entry Resource's \c PCMK__XE_LRM_RSC_OP status XML
* \param[out] start_index Where to store index of start-like action, if any
* \param[out] stop_index Where to store index of stop action, if any
*/
static GList *
rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
{
GList *ops = NULL;
for (xmlNode *rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) {
ops = g_list_prepend(ops, rsc_op);
}
ops = g_list_sort(ops, sort_op_by_callid);
calculate_active_ops(ops, start_index, stop_index);
return ops;
}
/*!
* \internal
* \brief Process a resource's action history from the CIB status
*
* Given a resource's action history, if the resource's configuration
* changed since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in] rsc_entry Resource's \c PCMK__XE_LRM_RSC_OP status XML
* \param[in,out] rsc Resource whose history is being processed
* \param[in,out] node Node whose history is being processed
*/
static void
process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
pcmk_node_t *node)
{
int offset = -1;
int stop_index = 0;
int start_index = 0;
GList *sorted_op_list = NULL;
if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
if (pcmk__is_anonymous_clone(pe__const_top_resource(rsc, false))) {
pcmk__rsc_trace(rsc,
"Skipping configuration check "
"for orphaned clone instance %s",
rsc->id);
} else {
pcmk__rsc_trace(rsc,
"Skipping configuration check and scheduling "
"clean-up for orphaned resource %s", rsc->id);
pcmk__schedule_cleanup(rsc, node, false);
}
return;
}
if (pe_find_node_id(rsc->priv->active_nodes,
node->priv->id) == NULL) {
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
pcmk__schedule_cleanup(rsc, node, false);
}
pcmk__rsc_trace(rsc,
"Skipping configuration check for %s "
"because no longer active on %s",
rsc->id, pcmk__node_name(node));
return;
}
pcmk__rsc_trace(rsc, "Checking for configuration changes for %s on %s",
rsc->id, pcmk__node_name(node));
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
pcmk__schedule_cleanup(rsc, node, false);
}
sorted_op_list = rsc_history_as_list(rsc_entry, &start_index, &stop_index);
if (start_index < stop_index) {
return; // Resource is stopped
}
for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
xmlNode *rsc_op = (xmlNode *) iter->data;
const char *task = NULL;
guint interval_ms = 0;
if (++offset < start_index) {
// Skip actions that happened before a start
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if ((interval_ms > 0)
&& (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)
|| node->details->maintenance)) {
// Maintenance mode cancels recurring operations
pcmk__schedule_cancel(rsc,
crm_element_value(rsc_op, PCMK__XA_CALL_ID),
task, interval_ms, node, "maintenance mode");
} else if ((interval_ms > 0)
|| pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START,
PCMK_ACTION_PROMOTE,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't assigned resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
- pe__add_param_check(rsc_op, rsc, node, pcmk__check_active,
- rsc->priv->scheduler);
+ pcmk__add_param_check(rsc_op, rsc, node, pcmk__check_active,
+ rsc->priv->scheduler);
} else if (pcmk__check_action_config(rsc, node, rsc_op)
&& (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL) != 0)) {
pe__clear_failcount(rsc, node, "action definition changed",
rsc->priv->scheduler);
}
}
}
g_list_free(sorted_op_list);
}
/*!
* \internal
* \brief Process a node's action history from the CIB status
*
* Given a node's resource history, if the resource's configuration changed
* since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] node Node whose history is being processed
* \param[in] lrm_rscs Node's \c PCMK__XE_LRM_RESOURCES from CIB status XML
*/
static void
process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
{
crm_trace("Processing node history for %s", pcmk__node_name(node));
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rscs,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
if (rsc_entry->children != NULL) {
GList *result = pcmk__rscs_matching_id(pcmk__xe_id(rsc_entry),
node->priv->scheduler);
for (GList *iter = result; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (pcmk__is_primitive(rsc)) {
process_rsc_history(rsc_entry, rsc, node);
}
}
g_list_free(result);
}
}
}
// XPath to find a node's resource history
#define XPATH_NODE_HISTORY "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE \
"[@" PCMK_XA_UNAME "='%s']" \
"/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES
/*!
* \internal
* \brief Process any resource configuration changes in the CIB status
*
* Go through all nodes' resource history, and if a resource's configuration
* changed since its actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
{
crm_trace("Check resource and action configuration for changes");
/* Rather than iterate through the status section, iterate through the nodes
* and search for the appropriate status subsection for each. This skips
* orphaned nodes and lets us eliminate some cases before searching the XML.
*/
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* Don't bother checking actions for a node that can't run actions ...
* unless it's in maintenance mode, in which case we still need to
* cancel any existing recurring monitors.
*/
if (node->details->maintenance
|| pcmk__node_available(node, false, false)) {
char *xpath = NULL;
xmlNode *history = NULL;
xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->priv->name);
history = get_xpath_object(xpath, scheduler->input, LOG_NEVER);
free(xpath);
process_node_history(node, history);
}
}
}
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index 8960c298f5..659fbbbfbe 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -1,880 +1,880 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/cib/internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/scheduler_internal.h>
#include <glib.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
* \brief Do deferred action checks after assignment
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
* time, bundles using the REMOTE_CONTAINER_HACK don't have their final
* parameter information, so instead they add a deferred check to a list. This
* function processes one entry in that list.
*
* \param[in,out] rsc Resource that action history is for
* \param[in,out] node Node that action history is for
* \param[in] rsc_op Action history entry
* \param[in] check Type of deferred check to do
*/
static void
check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
enum pcmk__check_parameters check)
{
const char *reason = NULL;
pcmk__op_digest_t *digest_data = NULL;
switch (check) {
case pcmk__check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
&& pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL)) {
reason = "action definition changed";
}
break;
case pcmk__check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
rsc->priv->scheduler);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, pcmk__xe_id(rsc_op), node->priv->id);
break;
case pcmk__digest_match:
break;
default:
reason = "resource parameters have changed";
break;
}
break;
}
if (reason != NULL) {
pe__clear_failcount(rsc, node, reason, rsc->priv->scheduler);
}
}
/*!
* \internal
* \brief Check whether a resource has failcount clearing scheduled on a node
*
* \param[in] node Node to check
* \param[in] rsc Resource to check
*
* \return true if \p rsc has failcount clearing scheduled on \p node,
* otherwise false
*/
static bool
failcount_clear_action_exists(const pcmk_node_t *node,
const pcmk_resource_t *rsc)
{
GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
TRUE);
if (list != NULL) {
g_list_free(list);
return true;
}
return false;
}
/*!
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
* \param[in,out] data Resource to check failure threshold for
* \param[in] user_data Node to check resource on
*/
static void
check_failure_threshold(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
// If this is a collective resource, apply recursively to children instead
if (rsc->priv->children != NULL) {
g_list_foreach(rsc->priv->children, check_failure_threshold,
user_data);
return;
}
if (!failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
* @TODO Failcount clearing can be scheduled in
* pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
* schedule_resource_actions() via check_params(). This runs well before
* then, so it cannot detect those, meaning we might check the migration
* threshold when we shouldn't. Worst case, we stop or move the
* resource, then move it back in the next transition.
*/
pcmk_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -PCMK_SCORE_INFINITY,
"__fail_limit__", rsc->priv->scheduler);
}
}
}
/*!
* \internal
* \brief If resource has exclusive discovery, ban node if not allowed
*
* Location constraints have a PCMK_XA_RESOURCE_DISCOVERY option that allows
* users to specify where probes are done for the affected resource. If this is
* set to \c exclusive, probes will only be done on nodes listed in exclusive
* constraints. This function bans the resource from the node if the node is not
* listed.
*
* \param[in,out] data Resource to check
* \param[in] user_data Node to check resource on
*/
static void
apply_exclusive_discovery(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
if (pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)
|| pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pcmk__rsc_exclusive_probes)) {
pcmk_node_t *match = NULL;
// If this is a collective resource, apply recursively to children
g_list_foreach(rsc->priv->children, apply_exclusive_discovery,
user_data);
match = g_hash_table_lookup(rsc->priv->allowed_nodes,
node->priv->id);
if ((match != NULL)
&& (match->assign->probe_mode != pcmk__probe_exclusive)) {
match->assign->score = -PCMK_SCORE_INFINITY;
}
}
}
/*!
* \internal
* \brief Apply stickiness to a resource if appropriate
*
* \param[in,out] data Resource to check for stickiness
* \param[in] user_data Ignored
*/
static void
apply_stickiness(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
pcmk_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->priv->children != NULL) {
g_list_foreach(rsc->priv->children, apply_stickiness, NULL);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)
|| (rsc->priv->stickiness < 1)
|| !pcmk__list_of_1(rsc->priv->active_nodes)) {
return;
}
node = rsc->priv->active_nodes->data;
/* In a symmetric cluster, stickiness can always be used. In an
* asymmetric cluster, we have to check whether the resource is still
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
if (!pcmk_is_set(rsc->priv->scheduler->flags,
pcmk__sched_symmetric_cluster)
&& (g_hash_table_lookup(rsc->priv->allowed_nodes,
node->priv->id) == NULL)) {
pcmk__rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and %s is not explicitly allowed",
rsc->id, pcmk__node_name(node));
return;
}
pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
rsc->id, rsc->priv->stickiness, pcmk__node_name(node));
resource_location(rsc, node, rsc->priv->stickiness, "stickiness",
rsc->priv->scheduler);
}
/*!
* \internal
* \brief Apply shutdown locks for all resources as appropriate
*
* \param[in,out] scheduler Scheduler data
*/
static void
apply_shutdown_locks(pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) {
return;
}
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->priv->cmds->shutdown_lock(rsc);
}
}
/*
* \internal
* \brief Apply node-specific scheduling criteria
*
* After the CIB has been unpacked, process node-specific scheduling criteria
* including shutdown locks, location constraints, resource stickiness,
* migration thresholds, and exclusive resource discovery.
*/
static void
apply_node_criteria(pcmk_scheduler_t *scheduler)
{
crm_trace("Applying node-specific scheduling criteria");
apply_shutdown_locks(scheduler);
pcmk__apply_locations(scheduler);
g_list_foreach(scheduler->priv->resources, apply_stickiness, NULL);
for (GList *node_iter = scheduler->nodes; node_iter != NULL;
node_iter = node_iter->next) {
for (GList *rsc_iter = scheduler->priv->resources;
rsc_iter != NULL; rsc_iter = rsc_iter->next) {
check_failure_threshold(rsc_iter->data, node_iter->data);
apply_exclusive_discovery(rsc_iter->data, node_iter->data);
}
}
}
/*!
* \internal
* \brief Assign resources to nodes
*
* \param[in,out] scheduler Scheduler data
*/
static void
assign_resources(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
crm_trace("Assigning resources to nodes");
if (!pcmk__str_eq(scheduler->priv->placement_strategy, PCMK_VALUE_DEFAULT,
pcmk__str_casei)) {
pcmk__sort_resources(scheduler);
}
pcmk__show_node_capacities("Original", scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_have_remote_nodes)) {
/* Assign remote connection resources first (which will also assign any
* colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
for (iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
const pcmk_node_t *target = rsc->priv->partial_migration_target;
if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
rsc->id);
rsc->priv->cmds->assign(rsc, target, true);
}
}
}
/* now do the rest of the resources */
for (iter = scheduler->priv->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (!pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
rsc->priv->xml->name, rsc->id);
rsc->priv->cmds->assign(rsc, NULL, true);
}
}
pcmk__show_node_capacities("Remaining", scheduler);
}
/*!
* \internal
* \brief Schedule fail count clearing on online nodes if resource is orphaned
*
* \param[in,out] data Resource to check
* \param[in] user_data Ignored
*/
static void
clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
return;
}
crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
/* There's no need to recurse into rsc->private->children because those
* should just be unassigned clone instances.
*/
for (GList *iter = rsc->priv->scheduler->nodes;
iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
pcmk_action_t *clear_op = NULL;
if (!node->details->online) {
continue;
}
if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
continue;
}
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
rsc->priv->scheduler);
/* We can't use order_action_then_stop() here because its
* pcmk__ar_guest_allowed breaks things
*/
pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
NULL, pcmk__ar_ordered, rsc->priv->scheduler);
}
}
/*!
* \internal
* \brief Schedule any resource actions needed
*
* \param[in,out] scheduler Scheduler data
*/
static void
schedule_resource_actions(pcmk_scheduler_t *scheduler)
{
// Process deferred action checks
- pe__foreach_param_check(scheduler, check_params);
- pe__free_param_checks(scheduler);
+ pcmk__foreach_param_check(scheduler, check_params);
+ pcmk__free_param_checks(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_probe_resources)) {
crm_trace("Scheduling probes");
pcmk__schedule_probes(scheduler);
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) {
g_list_foreach(scheduler->priv->resources, clear_failcounts_if_orphaned,
NULL);
}
crm_trace("Scheduling resource actions");
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->priv->cmds->create_actions(rsc);
}
}
/*!
* \internal
* \brief Check whether a resource or any of its descendants are managed
*
* \param[in] rsc Resource to check
*
* \return true if resource or any descendant is managed, otherwise false
*/
static bool
is_managed(const pcmk_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
return true;
}
for (GList *iter = rsc->priv->children;
iter != NULL; iter = iter->next) {
if (is_managed((pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether any resources in the cluster are managed
*
* \param[in] scheduler Scheduler data
*
* \return true if any resource is managed, otherwise false
*/
static bool
any_managed_resources(const pcmk_scheduler_t *scheduler)
{
for (const GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
if (is_managed((const pcmk_resource_t *) iter->data)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether a node requires fencing
*
* \param[in] node Node to check
* \param[in] have_managed Whether any resource in cluster is managed
*
* \return true if \p node should be fenced, otherwise false
*/
static bool
needs_fencing(const pcmk_node_t *node, bool have_managed)
{
return have_managed && node->details->unclean
&& pe_can_fence(node->priv->scheduler, node);
}
/*!
* \internal
* \brief Check whether a node requires shutdown
*
* \param[in] node Node to check
*
* \return true if \p node should be shut down, otherwise false
*/
static bool
needs_shutdown(const pcmk_node_t *node)
{
if (pcmk__is_pacemaker_remote_node(node)) {
/* Do not send shutdown actions for Pacemaker Remote nodes.
* @TODO We might come up with a good use for this in the future.
*/
return false;
}
return node->details->online && node->details->shutdown;
}
/*!
* \internal
* \brief Track and order non-DC fencing
*
* \param[in,out] list List of existing non-DC fencing actions
* \param[in,out] action Fencing action to prepend to \p list
* \param[in] scheduler Scheduler data
*
* \return (Possibly new) head of \p list
*/
static GList *
add_nondc_fencing(GList *list, pcmk_action_t *action,
const pcmk_scheduler_t *scheduler)
{
if (!pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)
&& (list != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
order_actions((pcmk_action_t *) list->data, action, pcmk__ar_ordered);
}
return g_list_prepend(list, action);
}
/*!
* \internal
* \brief Schedule a node for fencing
*
* \param[in,out] node Node that requires fencing
*/
static pcmk_action_t *
schedule_fencing(pcmk_node_t *node)
{
pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
FALSE, node->priv->scheduler);
pcmk__sched_warn(node->priv->scheduler, "Scheduling node %s for fencing",
pcmk__node_name(node));
pcmk__order_vs_fence(fencing, node->priv->scheduler);
return fencing;
}
/*!
* \internal
* \brief Create and order node fencing and shutdown actions
*
* \param[in,out] scheduler Scheduler data
*/
static void
schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
{
pcmk_action_t *dc_down = NULL;
bool integrity_lost = false;
bool have_managed = any_managed_resources(scheduler);
GList *fencing_ops = NULL;
GList *shutdown_ops = NULL;
crm_trace("Scheduling fencing and shutdowns as needed");
if (!have_managed) {
crm_notice("No fencing will be done until there are resources "
"to manage");
}
// Check each node for whether it needs fencing or shutdown
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
pcmk_action_t *fencing = NULL;
const bool is_dc = pcmk__same_node(node, scheduler->dc_node);
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pcmk__is_guest_or_bundle_node(node)) {
if (pcmk_is_set(node->priv->flags, pcmk__node_remote_reset)
&& have_managed && pe_can_fence(scheduler, node)) {
pcmk__fence_guest(node);
}
continue;
}
if (needs_fencing(node, have_managed)) {
fencing = schedule_fencing(node);
// Track DC and non-DC fence actions separately
if (is_dc) {
dc_down = fencing;
} else {
fencing_ops = add_nondc_fencing(fencing_ops, fencing,
scheduler);
}
} else if (needs_shutdown(node)) {
pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
// Track DC and non-DC shutdown actions separately
if (is_dc) {
dc_down = down_op;
} else {
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
}
}
if ((fencing == NULL) && node->details->unclean) {
integrity_lost = true;
pcmk__config_warn("Node %s is unclean but cannot be fenced",
pcmk__node_name(node));
}
}
if (integrity_lost) {
if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
pcmk__config_warn("Resource functionality and data integrity "
"cannot be guaranteed (configure, enable, "
"and test fencing to correct this)");
} else if (!pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) {
crm_notice("Unclean nodes will not be fenced until quorum is "
"attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
PCMK_VALUE_IGNORE);
}
}
if (dc_down != NULL) {
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
* DC elections. However, we don't want to order non-DC shutdowns before
* a DC *fencing*, because even though we don't want a node that's
* shutting down to become DC, the DC fencing could be ordered before a
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
pcmk__order_after_each(dc_down, shutdown_ops);
}
// Order any non-DC fencing before any DC fencing or shutdown
if (pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
pcmk__order_after_each(dc_down, fencing_ops);
} else if (fencing_ops != NULL) {
/* Without concurrent fencing, the non-DC fencing actions are
* already ordered relative to each other, so we just need to order
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
pcmk__ar_ordered);
}
}
g_list_free(fencing_ops);
g_list_free(shutdown_ops);
}
static void
log_resource_details(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv->out;
GList *all = NULL;
/* Due to the `crm_mon --node=` feature, out->message() for all the
* resource-related messages expects a list of nodes that we are allowed to
* output information for. Here, we create a wildcard to match all nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
for (GList *item = scheduler->priv->resources;
item != NULL; item = item->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
// Log all resources except inactive orphans
if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)
|| (rsc->priv->orig_role != pcmk_role_stopped)) {
out->message(out, (const char *) rsc->priv->xml->name, 0UL,
rsc, all, all);
}
}
g_list_free(all);
}
static void
log_all_actions(pcmk_scheduler_t *scheduler)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
pcmk__output_t *prev_out = scheduler->priv->out;
pcmk__output_t *out = NULL;
if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
return;
}
pe__register_messages(out);
pcmk__register_lib_messages(out);
pcmk__output_set_log_level(out, LOG_NOTICE);
scheduler->priv->out = out;
out->begin_list(out, NULL, NULL, "Actions");
pcmk__output_actions(scheduler);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
scheduler->priv->out = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
* \param[in] scheduler Scheduler data
*/
static void
log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
{
const uint64_t flags = pcmk__action_optional
|pcmk__action_runnable
|pcmk__action_pseudo;
crm_trace("Required but unrunnable actions:");
for (const GList *iter = scheduler->priv->actions;
iter != NULL; iter = iter->next) {
const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
}
}
}
/*!
* \internal
* \brief Unpack the CIB for scheduling
*
* \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
{
if (pcmk_is_set(scheduler->flags, pcmk__sched_have_status)) {
crm_trace("Reusing previously calculated cluster status");
pcmk__set_scheduler_flags(scheduler, flags);
return;
}
pcmk__assert(cib != NULL);
crm_trace("Calculating cluster status");
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
* set unless pcmk__sched_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
set_working_set_defaults(scheduler);
pcmk__set_scheduler_flags(scheduler, flags);
scheduler->input = cib;
cluster_status(scheduler); // Sets pcmk__sched_have_status
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
* \param[in,out] cib CIB XML to use as scheduler input
* \param[in] flags Scheduler flags to set in addition to defaults
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
pcmk_scheduler_t *scheduler)
{
unpack_cib(cib, flags, scheduler);
pcmk__set_assignment_methods(scheduler);
pcmk__apply_node_health(scheduler);
pcmk__unpack_constraints(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_validate_only)) {
return;
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)
&& pcmk__is_daemon) {
log_resource_details(scheduler);
}
apply_node_criteria(scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
return;
}
pcmk__create_internal_constraints(scheduler);
pcmk__handle_rsc_config_changes(scheduler);
assign_resources(scheduler);
schedule_resource_actions(scheduler);
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we can mark nodes as needing fencing.
*/
pcmk__order_remote_connection_actions(scheduler);
schedule_fencing_and_shutdowns(scheduler);
pcmk__apply_orderings(scheduler);
log_all_actions(scheduler);
pcmk__create_graph(scheduler);
if (get_crm_log_level() == LOG_TRACE) {
log_unrunnable_actions(scheduler);
}
}
/*!
* \internal
* \brief Initialize scheduler data
*
* Make our own copies of the CIB XML and date/time object, if they're not
* \c NULL. This way we don't have to take ownership of the objects passed via
* the API.
*
* This function is most useful for public API functions that want the caller
* to retain ownership of the CIB object
*
* \param[in,out] out Output object
* \param[in] input The CIB XML to check (if \c NULL, use current CIB)
* \param[in] date Date and time to use in the scheduler (if \c NULL,
* use current date and time). This can be used for
* checking whether a rule is in effect at a certa
* date and time.
* \param[out] scheduler Where to store initialized scheduler data
*
* \return Standard Pacemaker return code
*/
int
pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
pcmk_scheduler_t **scheduler)
{
// Allows for cleaner syntax than dereferencing the scheduler argument
pcmk_scheduler_t *new_scheduler = NULL;
new_scheduler = pe_new_working_set();
if (new_scheduler == NULL) {
return ENOMEM;
}
pcmk__set_scheduler_flags(new_scheduler, pcmk__sched_no_counts);
// Populate the scheduler data
// Make our own copy of the given input or fetch the CIB and use that
if (input != NULL) {
new_scheduler->input = pcmk__xml_copy(NULL, input);
if (new_scheduler->input == NULL) {
out->err(out, "Failed to copy input XML");
pe_free_working_set(new_scheduler);
return ENOMEM;
}
} else {
int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
if (rc != pcmk_rc_ok) {
pe_free_working_set(new_scheduler);
return rc;
}
}
// Make our own copy of the given crm_time_t object; otherwise
// cluster_status() populates with the current time
if (date != NULL) {
// pcmk_copy_time() guarantees non-NULL
new_scheduler->priv->now = pcmk_copy_time(date);
}
// Unpack everything
cluster_status(new_scheduler);
*scheduler = new_scheduler;
return pcmk_rc_ok;
}
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
index 395fa473e6..b3820a742d 100644
--- a/lib/pengine/remote.c
+++ b/lib/pengine/remote.c
@@ -1,242 +1,181 @@
/*
* Copyright 2013-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/common/xml.h>
#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
#include <glib.h>
/*!
* \internal
* \brief Check whether a resource creates a guest node
*
* If a given resource contains a launched resource that is a remote connection,
* return that launched resource (or NULL if none is found).
*
* \param[in] scheduler Scheduler data
* \param[in] rsc Resource to check
*
* \return Launched remote connection, or NULL if none found
*/
pcmk_resource_t *
pe__resource_contains_guest_node(const pcmk_scheduler_t *scheduler,
const pcmk_resource_t *rsc)
{
if ((rsc != NULL) && (scheduler != NULL)
&& pcmk_is_set(scheduler->flags, pcmk__sched_have_remote_nodes)) {
for (GList *gIter = rsc->priv->launched;
gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *launched = gIter->data;
if (pcmk_is_set(launched->flags, pcmk__rsc_is_remote_connection)) {
return launched;
}
}
}
return NULL;
}
bool
xml_contains_remote_node(xmlNode *xml)
{
const char *value = NULL;
if (xml == NULL) {
return false;
}
value = crm_element_value(xml, PCMK_XA_TYPE);
if (!pcmk__str_eq(value, "remote", pcmk__str_casei)) {
return false;
}
value = crm_element_value(xml, PCMK_XA_CLASS);
if (!pcmk__str_eq(value, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) {
return false;
}
value = crm_element_value(xml, PCMK_XA_PROVIDER);
if (!pcmk__str_eq(value, "pacemaker", pcmk__str_casei)) {
return false;
}
return true;
}
/*!
* \internal
* \brief Execute a supplied function for each guest node running on a host
*
* \param[in] scheduler Scheduler data
* \param[in] host Host node to check
* \param[in] helper Function to call for each guest node
* \param[in,out] user_data Pointer to pass to helper function
*/
void
pe_foreach_guest_node(const pcmk_scheduler_t *scheduler,
const pcmk_node_t *host,
void (*helper)(const pcmk_node_t*, void*),
void *user_data)
{
GList *iter;
CRM_CHECK(scheduler && host && host->details && helper, return);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_have_remote_nodes)) {
return;
}
for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)
&& (rsc->priv->launcher != NULL)) {
pcmk_node_t *guest_node = pcmk_find_node(scheduler, rsc->id);
if (guest_node) {
(*helper)(guest_node, user_data);
}
}
}
}
/*!
* \internal
* \brief Create CIB XML for an implicit remote connection
*
* \param[in,out] parent If not \c NULL, use as parent XML element
* \param[in] uname Name of Pacemaker Remote node
* \param[in] container_id If not \c NULL, use this as connection container
* \param[in] migrateable If not \c NULL, use as remote
* \c PCMK_META_ALLOW_MIGRATE value
* \param[in] is_managed If not \c NULL, use as remote
* \c PCMK_META_IS_MANAGED value
* \param[in] start_timeout If not \c NULL, use as remote connect timeout
* \param[in] server If not \c NULL, use as \c PCMK_REMOTE_RA_ADDR
* \param[in] port If not \c NULL, use as \c PCMK_REMOTE_RA_PORT
*
* \return Newly created XML
*/
xmlNode *
pe_create_remote_xml(xmlNode *parent, const char *uname,
const char *container_id, const char *migrateable,
const char *is_managed, const char *start_timeout,
const char *server, const char *port)
{
xmlNode *remote;
xmlNode *xml_sub;
remote = pcmk__xe_create(parent, PCMK_XE_PRIMITIVE);
// Add identity
crm_xml_add(remote, PCMK_XA_ID, uname);
crm_xml_add(remote, PCMK_XA_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(remote, PCMK_XA_PROVIDER, "pacemaker");
crm_xml_add(remote, PCMK_XA_TYPE, "remote");
// Add meta-attributes
xml_sub = pcmk__xe_create(remote, PCMK_XE_META_ATTRIBUTES);
pcmk__xe_set_id(xml_sub, "%s-%s", uname, PCMK_XE_META_ATTRIBUTES);
crm_create_nvpair_xml(xml_sub, NULL,
PCMK__META_INTERNAL_RSC, PCMK_VALUE_TRUE);
if (container_id) {
crm_create_nvpair_xml(xml_sub, NULL,
PCMK__META_CONTAINER, container_id);
}
if (migrateable) {
crm_create_nvpair_xml(xml_sub, NULL,
PCMK_META_ALLOW_MIGRATE, migrateable);
}
if (is_managed) {
crm_create_nvpair_xml(xml_sub, NULL, PCMK_META_IS_MANAGED, is_managed);
}
// Add instance attributes
if (port || server) {
xml_sub = pcmk__xe_create(remote, PCMK_XE_INSTANCE_ATTRIBUTES);
pcmk__xe_set_id(xml_sub, "%s-%s", uname, PCMK_XE_INSTANCE_ATTRIBUTES);
if (server) {
crm_create_nvpair_xml(xml_sub, NULL, PCMK_REMOTE_RA_ADDR, server);
}
if (port) {
crm_create_nvpair_xml(xml_sub, NULL, PCMK_REMOTE_RA_PORT, port);
}
}
// Add operations
xml_sub = pcmk__xe_create(remote, PCMK_XE_OPERATIONS);
crm_create_op_xml(xml_sub, uname, PCMK_ACTION_MONITOR, "30s", "30s");
if (start_timeout) {
crm_create_op_xml(xml_sub, uname, PCMK_ACTION_START, "0",
start_timeout);
}
return remote;
}
-
-// History entry to be checked for fail count clearing
-struct check_op {
- const xmlNode *rsc_op; // History entry XML
- pcmk_resource_t *rsc; // Known resource corresponding to history entry
- pcmk_node_t *node; // Known node corresponding to history entry
- enum pcmk__check_parameters check_type; // What needs checking
-};
-
-void
-pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
- pcmk_node_t *node, enum pcmk__check_parameters flag,
- pcmk_scheduler_t *scheduler)
-{
- struct check_op *check_op = NULL;
-
- CRM_CHECK(scheduler && rsc_op && rsc && node, return);
-
- check_op = pcmk__assert_alloc(1, sizeof(struct check_op));
-
- crm_trace("Deferring checks of %s until after allocation",
- pcmk__xe_id(rsc_op));
- check_op->rsc_op = rsc_op;
- check_op->rsc = rsc;
- check_op->node = node;
- check_op->check_type = flag;
- scheduler->priv->param_check = g_list_prepend(scheduler->priv->param_check,
- check_op);
-}
-
-/*!
- * \internal
- * \brief Call a function for each action to be checked for addr substitution
- *
- * \param[in,out] scheduler Scheduler data
- * \param[in] cb Function to be called
- */
-void
-pe__foreach_param_check(pcmk_scheduler_t *scheduler,
- void (*cb)(pcmk_resource_t*, pcmk_node_t*,
- const xmlNode*, enum pcmk__check_parameters))
-{
- CRM_CHECK(scheduler && cb, return);
-
- for (GList *item = scheduler->priv->param_check;
- item != NULL; item = item->next) {
- struct check_op *check_op = item->data;
-
- cb(check_op->rsc, check_op->node, check_op->rsc_op,
- check_op->check_type);
- }
-}
-
-void
-pe__free_param_checks(pcmk_scheduler_t *scheduler)
-{
- if ((scheduler != NULL) && (scheduler->priv->param_check != NULL)) {
- g_list_free_full(scheduler->priv->param_check, free);
- scheduler->priv->param_check = NULL;
- }
-}
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index 9ce10fdb72..76d583b60d 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,532 +1,532 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/common/xml.h>
#include <crm/common/cib_internal.h>
#include <glib.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
/*!
* \brief Create a new object to hold scheduler data
*
* \return New, initialized scheduler data on success, else NULL (and set errno)
* \note Only pcmk_scheduler_t objects created with this function (as opposed
* to statically declared or directly allocated) should be used with the
* functions in this library, to allow for future extensions to the
* data type. The caller is responsible for freeing the memory with
* pe_free_working_set() when the instance is no longer needed.
*/
pcmk_scheduler_t *
pe_new_working_set(void)
{
pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
if (scheduler == NULL) {
return NULL;
}
scheduler->priv = calloc(1, sizeof(pcmk__scheduler_private_t));
if (scheduler->priv == NULL) {
free(scheduler);
return NULL;
}
pcmk__set_scheduler_defaults(scheduler);
return scheduler;
}
/*!
* \brief Free scheduler data
*
* \param[in,out] scheduler Scheduler data to free
*/
void
pe_free_working_set(pcmk_scheduler_t *scheduler)
{
if (scheduler != NULL) {
pe_reset_working_set(scheduler);
free(scheduler->priv->local_node_name);
free(scheduler->priv);
free(scheduler);
}
}
#define XPATH_DEPRECATED_RULES \
"//" PCMK_XE_OP_DEFAULTS "//" PCMK_XE_EXPRESSION \
"|//" PCMK_XE_OP "//" PCMK_XE_EXPRESSION
/*!
* \internal
* \brief Log a warning for deprecated rule syntax in operations
*
* \param[in] scheduler Scheduler data
*/
static void
check_for_deprecated_rules(pcmk_scheduler_t *scheduler)
{
// @COMPAT Drop this function when support for the syntax is dropped
xmlNode *deprecated = get_xpath_object(XPATH_DEPRECATED_RULES,
scheduler->input, LOG_NEVER);
if (deprecated != NULL) {
pcmk__warn_once(pcmk__wo_op_attr_expr,
"Support for rules with node attribute expressions in "
PCMK_XE_OP " or " PCMK_XE_OP_DEFAULTS " is deprecated "
"and will be dropped in a future release");
}
}
/*
* Unpack everything
* At the end you'll have:
* - A list of nodes
* - A list of resources (each with any dependencies on other resources)
* - A list of constraints between resources and nodes
* - A list of constraints between start/stop actions
* - A list of nodes that need to be stonith'd
* - A list of nodes that need to be shutdown
* - A list of the possible stop/start actions (without dependencies)
*/
gboolean
cluster_status(pcmk_scheduler_t * scheduler)
{
const char *new_version = NULL;
xmlNode *section = NULL;
if ((scheduler == NULL) || (scheduler->input == NULL)) {
return FALSE;
}
new_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET);
if (pcmk__check_feature_set(new_version) != pcmk_rc_ok) {
pcmk__config_err("Can't process CIB with feature set '%s' greater than our own '%s'",
new_version, CRM_FEATURE_SET);
return FALSE;
}
crm_trace("Beginning unpack");
if (scheduler->priv->failed != NULL) {
pcmk__xml_free(scheduler->priv->failed);
}
scheduler->priv->failed = pcmk__xe_create(NULL, "failed-ops");
if (scheduler->priv->now == NULL) {
scheduler->priv->now = crm_time_new(NULL);
}
if (pcmk__xe_attr_is_true(scheduler->input, PCMK_XA_HAVE_QUORUM)) {
pcmk__set_scheduler_flags(scheduler, pcmk__sched_quorate);
} else {
pcmk__clear_scheduler_flags(scheduler, pcmk__sched_quorate);
}
scheduler->priv->op_defaults = get_xpath_object("//" PCMK_XE_OP_DEFAULTS,
scheduler->input,
LOG_NEVER);
check_for_deprecated_rules(scheduler);
scheduler->priv->rsc_defaults = get_xpath_object("//" PCMK_XE_RSC_DEFAULTS,
scheduler->input,
LOG_NEVER);
section = get_xpath_object("//" PCMK_XE_CRM_CONFIG, scheduler->input,
LOG_TRACE);
unpack_config(section, scheduler);
if (!pcmk_any_flags_set(scheduler->flags,
pcmk__sched_location_only|pcmk__sched_quorate)
&& (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
pcmk__sched_warn(scheduler,
"Fencing and resource management disabled "
"due to lack of quorum");
}
section = get_xpath_object("//" PCMK_XE_NODES, scheduler->input, LOG_TRACE);
unpack_nodes(section, scheduler);
section = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input,
LOG_TRACE);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
unpack_remote_nodes(section, scheduler);
}
unpack_resources(section, scheduler);
section = get_xpath_object("//" PCMK_XE_FENCING_TOPOLOGY, scheduler->input,
LOG_TRACE);
pcmk__validate_fencing_topology(section);
section = get_xpath_object("//" PCMK_XE_TAGS, scheduler->input, LOG_NEVER);
unpack_tags(section, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
section = get_xpath_object("//" PCMK_XE_STATUS, scheduler->input,
LOG_TRACE);
unpack_status(section, scheduler);
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_no_counts)) {
for (GList *item = scheduler->priv->resources;
item != NULL; item = item->next) {
pcmk_resource_t *rsc = item->data;
rsc->priv->fns->count(item->data);
}
crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
scheduler->priv->ninstances,
scheduler->priv->disabled_resources,
scheduler->priv->blocked_resources);
}
if ((scheduler->priv->local_node_name != NULL)
&& (pcmk_find_node(scheduler,
scheduler->priv->local_node_name) == NULL)) {
crm_info("Creating a fake local node for %s",
scheduler->priv->local_node_name);
pe_create_node(scheduler->priv->local_node_name,
scheduler->priv->local_node_name, NULL, 0, scheduler);
}
pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_status);
return TRUE;
}
/*!
* \internal
* \brief Free a list of pcmk_resource_t
*
* \param[in,out] resources List to free
*
* \note When the scheduler's resource list is freed, that includes the original
* storage for the uname and id of any Pacemaker Remote nodes in the
* scheduler's node list, so take care not to use those afterward.
* \todo Refactor pcmk_node_t to strdup() the node name.
*/
static void
pe_free_resources(GList *resources)
{
pcmk_resource_t *rsc = NULL;
GList *iterator = resources;
while (iterator != NULL) {
rsc = (pcmk_resource_t *) iterator->data;
iterator = iterator->next;
rsc->priv->fns->free(rsc);
}
if (resources != NULL) {
g_list_free(resources);
}
}
static void
pe_free_actions(GList *actions)
{
GList *iterator = actions;
while (iterator != NULL) {
pe_free_action(iterator->data);
iterator = iterator->next;
}
if (actions != NULL) {
g_list_free(actions);
}
}
static void
pe_free_nodes(GList *nodes)
{
for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
pcmk_node_t *node = (pcmk_node_t *) iterator->data;
// Shouldn't be possible, but to be safe ...
if (node == NULL) {
continue;
}
if (node->details == NULL) {
free(node);
continue;
}
/* This is called after pe_free_resources(), which means that we can't
* use node->private->name for Pacemaker Remote nodes.
*/
crm_trace("Freeing node %s", (pcmk__is_pacemaker_remote_node(node)?
"(guest or remote)" : pcmk__node_name(node)));
if (node->priv->attrs != NULL) {
g_hash_table_destroy(node->priv->attrs);
}
if (node->priv->utilization != NULL) {
g_hash_table_destroy(node->priv->utilization);
}
if (node->priv->digest_cache != NULL) {
g_hash_table_destroy(node->priv->digest_cache);
}
g_list_free(node->details->running_rsc);
g_list_free(node->priv->assigned_resources);
free(node->priv);
free(node->details);
free(node->assign);
free(node);
}
if (nodes != NULL) {
g_list_free(nodes);
}
}
static void
pe__free_ordering(GList *constraints)
{
GList *iterator = constraints;
while (iterator != NULL) {
pcmk__action_relation_t *order = iterator->data;
iterator = iterator->next;
free(order->task1);
free(order->task2);
free(order);
}
if (constraints != NULL) {
g_list_free(constraints);
}
}
static void
pe__free_location(GList *constraints)
{
GList *iterator = constraints;
while (iterator != NULL) {
pcmk__location_t *cons = iterator->data;
iterator = iterator->next;
g_list_free_full(cons->nodes, pcmk__free_node_copy);
free(cons->id);
free(cons);
}
if (constraints != NULL) {
g_list_free(constraints);
}
}
/*!
* \brief Reset scheduler data to defaults without freeing it or constraints
*
* \param[in,out] scheduler Scheduler data to reset
*
* \deprecated This function is deprecated as part of the API;
* pe_reset_working_set() should be used instead.
*/
void
cleanup_calculations(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return;
}
pcmk__clear_scheduler_flags(scheduler, pcmk__sched_have_status);
if (scheduler->priv->options != NULL) {
g_hash_table_destroy(scheduler->priv->options);
}
if (scheduler->priv->singletons != NULL) {
g_hash_table_destroy(scheduler->priv->singletons);
}
if (scheduler->priv->ticket_constraints != NULL) {
g_hash_table_destroy(scheduler->priv->ticket_constraints);
}
if (scheduler->priv->templates != NULL) {
g_hash_table_destroy(scheduler->priv->templates);
}
if (scheduler->priv->tags != NULL) {
g_hash_table_destroy(scheduler->priv->tags);
}
crm_trace("deleting resources");
pe_free_resources(scheduler->priv->resources);
crm_trace("deleting actions");
pe_free_actions(scheduler->priv->actions);
crm_trace("deleting nodes");
pe_free_nodes(scheduler->nodes);
- pe__free_param_checks(scheduler);
+ pcmk__free_param_checks(scheduler);
g_list_free(scheduler->priv->stop_needed);
crm_time_free(scheduler->priv->now);
pcmk__xml_free(scheduler->input);
pcmk__xml_free(scheduler->priv->failed);
pcmk__xml_free(scheduler->priv->graph);
set_working_set_defaults(scheduler);
CRM_LOG_ASSERT((scheduler->priv->location_constraints == NULL)
&& (scheduler->priv->ordering_constraints == NULL));
}
/*!
* \brief Reset scheduler data to default state without freeing it
*
* \param[in,out] scheduler Scheduler data to reset
*/
void
pe_reset_working_set(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return;
}
crm_trace("Deleting %d ordering constraints",
g_list_length(scheduler->priv->ordering_constraints));
pe__free_ordering(scheduler->priv->ordering_constraints);
scheduler->priv->ordering_constraints = NULL;
crm_trace("Deleting %d location constraints",
g_list_length(scheduler->priv->location_constraints));
pe__free_location(scheduler->priv->location_constraints);
scheduler->priv->location_constraints = NULL;
crm_trace("Deleting %d colocation constraints",
g_list_length(scheduler->priv->colocation_constraints));
g_list_free_full(scheduler->priv->colocation_constraints, free);
scheduler->priv->colocation_constraints = NULL;
cleanup_calculations(scheduler);
}
void
set_working_set_defaults(pcmk_scheduler_t *scheduler)
{
// These members must be preserved
pcmk__scheduler_private_t *priv = scheduler->priv;
pcmk__output_t *out = priv->out;
char *local_node_name = scheduler->priv->local_node_name;
// Wipe the main structs (any other members must have previously been freed)
memset(scheduler, 0, sizeof(pcmk_scheduler_t));
memset(priv, 0, sizeof(pcmk__scheduler_private_t));
// Restore the members to preserve
scheduler->priv = priv;
scheduler->priv->out = out;
scheduler->priv->local_node_name = local_node_name;
// Set defaults for everything else
pcmk__set_scheduler_defaults(scheduler);
}
pcmk_resource_t *
pe_find_resource(GList *rsc_list, const char *id)
{
return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
}
pcmk_resource_t *
pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
{
GList *rIter = NULL;
for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
pcmk_resource_t *parent = rIter->data;
pcmk_resource_t *match = parent->priv->fns->find_rsc(parent, id, NULL,
flags);
if (match != NULL) {
return match;
}
}
crm_trace("No match for %s", id);
return NULL;
}
/*!
* \brief Find a node by name or ID in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id If not NULL, ID of node to find
* \param[in] node_name If not NULL, name of node to find
*
* \return Node from \p nodes that matches \p id if any,
* otherwise node from \p nodes that matches \p uname if any,
* otherwise NULL
*/
pcmk_node_t *
pe_find_node_any(const GList *nodes, const char *id, const char *uname)
{
pcmk_node_t *match = NULL;
if (id != NULL) {
match = pe_find_node_id(nodes, id);
}
if ((match == NULL) && (uname != NULL)) {
match = pcmk__find_node_in_list(nodes, uname);
}
return match;
}
/*!
* \brief Find a node by ID in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id ID of node to find
*
* \return Node from \p nodes that matches \p id if any, otherwise NULL
*/
pcmk_node_t *
pe_find_node_id(const GList *nodes, const char *id)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* @TODO Whether node IDs should be considered case-sensitive should
* probably depend on the node type, so functionizing the comparison
* would be worthwhile
*/
if (pcmk__str_eq(node->priv->id, id, pcmk__str_casei)) {
return node;
}
}
return NULL;
}
// Deprecated functions kept only for backward API compatibility
// LCOV_EXCL_START
#include <crm/pengine/status_compat.h>
/*!
* \brief Find a node by name in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] node_name Name of node to find
*
* \return Node from \p nodes that matches \p node_name if any, otherwise NULL
*/
pcmk_node_t *
pe_find_node(const GList *nodes, const char *node_name)
{
return pcmk__find_node_in_list(nodes, node_name);
}
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 02404273ad..be9010cc62 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,5099 +1,5099 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/services.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/util.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
pcmk_resource_t *rsc; // Resource that history is for
pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
const char *id; // XML ID of history entry
const char *key; // Operation key of action
const char *task; // Action name
const char *exit_reason; // Exit reason given for result
guint interval_ms; // Action interval
int call_id; // Call ID of action
int expected_exit_status; // Expected exit status of action
int exit_status; // Actual exit status of action
int execution_status; // Execution status of action
};
/* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
* use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the
* flag is stringified more readably in log messages.
*/
#define set_config_flag(scheduler, option, flag) do { \
GHashTable *config_hash = (scheduler)->priv->options; \
const char *scf_value = pcmk__cluster_option(config_hash, (option)); \
\
if (scf_value != NULL) { \
if (crm_is_true(scf_value)) { \
(scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} else { \
(scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} \
} \
} while(0)
static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
xmlNode *xml_op, xmlNode **last_failure,
enum pcmk__on_fail *failed);
static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node);
static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler);
static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler);
/*!
* \internal
* \brief Check whether a node is a dangling guest node
*
* \param[in] node Node to check
*
* \return true if \p node had a Pacemaker Remote connection resource with a
* launcher that was removed from the CIB, otherwise false.
*/
static bool
is_dangling_guest_node(pcmk_node_t *node)
{
return pcmk__is_pacemaker_remote_node(node)
&& (node->priv->remote != NULL)
&& (node->priv->remote->priv->launcher == NULL)
&& pcmk_is_set(node->priv->remote->flags,
pcmk__rsc_removed_launched);
}
/*!
* \brief Schedule a fence action for a node
*
* \param[in,out] scheduler Scheduler data
* \param[in,out] node Node to fence
* \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider
* \c PCMK_OPT_PRIORITY_FENCING_DELAY
*/
void
pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
if (pcmk__is_guest_or_bundle_node(node)) {
// Fence a guest or bundle node by marking its launcher as failed
pcmk_resource_t *rsc = node->priv->remote->priv->launcher;
if (!pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
pcmk__node_name(node), reason, rsc->id);
} else {
pcmk__sched_warn(scheduler,
"Guest node %s will be fenced "
"(by recovering its guest resource %s): %s",
pcmk__node_name(node), rsc->id, reason);
/* We don't mark the node as unclean because that would prevent the
* node from running resources. We want to allow it to run resources
* in this transition if the recovery succeeds.
*/
pcmk__set_node_flags(node, pcmk__node_remote_reset);
pcmk__set_rsc_flags(rsc,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
}
}
} else if (is_dangling_guest_node(node)) {
crm_info("Cleaning up dangling connection for guest node %s: "
"fencing was already done because %s, "
"and guest resource no longer exists",
pcmk__node_name(node), reason);
pcmk__set_rsc_flags(node->priv->remote,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
} else if (pcmk__is_remote_node(node)) {
pcmk_resource_t *rsc = node->priv->remote;
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pcmk__node_name(node), reason);
} else if (!pcmk_is_set(node->priv->flags, pcmk__node_remote_reset)) {
pcmk__set_node_flags(node, pcmk__node_remote_reset);
pcmk__sched_warn(scheduler, "Remote node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes
pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
pcmk__sched_warn(scheduler, "Cluster node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
// @TODO xpaths can't handle templates, rules, or id-refs
// nvpair with provides or requires set to unfencing
#define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \
"[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \
"or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \
"and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']"
// unfencing in rsc_defaults or any resource
#define XPATH_ENABLE_UNFENCING \
"/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \
"//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \
"|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \
"/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR
static void
set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
if (!pcmk_is_set(scheduler->flags, flag)) {
result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
pcmk__set_scheduler_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
GHashTable *config_hash = pcmk__strkey_table(free, free);
const pcmk_rule_input_t rule_input = {
.now = scheduler->priv->now,
};
scheduler->priv->options = config_hash;
pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET,
&rule_input, config_hash,
PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, scheduler);
pcmk__validate_cluster_options(config_hash);
set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES,
pcmk__sched_probe_resources);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
" is nonzero");
pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
set_if_xpath(pcmk__sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
scheduler);
value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
pcmk_parse_interval_spec(value, &(scheduler->priv->fence_timeout_ms));
crm_debug("Default fencing action timeout: %s",
pcmk__readable_interval(scheduler->priv->fence_timeout_ms));
set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED,
pcmk__sched_fencing_enabled);
if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
crm_debug("STONITH of failed nodes is enabled");
} else {
crm_debug("STONITH of failed nodes is disabled");
}
scheduler->priv->fence_action =
pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_ACTION);
crm_trace("STONITH will %s nodes", scheduler->priv->fence_action);
set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING,
pcmk__sched_concurrent_fencing);
if (pcmk_is_set(scheduler->flags, pcmk__sched_concurrent_fencing)) {
crm_debug("Concurrent fencing is enabled");
} else {
crm_debug("Concurrent fencing is disabled");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY);
if (value) {
pcmk_parse_interval_spec(value,
&(scheduler->priv->priority_fencing_ms));
crm_trace("Priority fencing delay is %s",
pcmk__readable_interval(scheduler->priv->priority_fencing_ms));
}
set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES,
pcmk__sched_stop_all);
crm_debug("Stop all active resources: %s",
pcmk__flag_text(scheduler->flags, pcmk__sched_stop_all));
set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER,
pcmk__sched_symmetric_cluster);
if (pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY);
if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_demote;
} else if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE,
PCMK_VALUE_FENCE_LEGACY, NULL)) {
if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
int do_panic = 0;
crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC,
&do_panic);
if (do_panic
|| pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) {
scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop': cluster has never had quorum");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop' because fencing is disabled");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
switch (scheduler->no_quorum_policy) {
case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES,
pcmk__sched_stop_removed_resources);
if (pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) {
crm_trace("Orphan resources are stopped");
} else {
crm_trace("Orphan resources are ignored");
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS,
pcmk__sched_cancel_removed_actions);
if (pcmk_is_set(scheduler->flags, pcmk__sched_cancel_removed_actions)) {
crm_trace("Orphan resource actions are stopped");
} else {
crm_trace("Orphan resource actions are ignored");
}
set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE,
pcmk__sched_in_maintenance);
crm_trace("Maintenance mode: %s",
pcmk__flag_text(scheduler->flags, pcmk__sched_in_maintenance));
set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL,
pcmk__sched_start_failure_fatal);
if (pcmk_is_set(scheduler->flags, pcmk__sched_start_failure_fatal)) {
crm_trace("Start failures are always fatal");
} else {
crm_trace("Start failures are handled by failcount");
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING,
pcmk__sched_startup_fencing);
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
pcmk__warn_once(pcmk__wo_blind,
"Blind faith: not fencing unseen nodes");
}
pe__unpack_node_health_scores(scheduler);
scheduler->priv->placement_strategy =
pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY);
crm_trace("Placement strategy: %s", scheduler->priv->placement_strategy);
set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK,
pcmk__sched_shutdown_lock);
if (pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) {
value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT);
pcmk_parse_interval_spec(value, &(scheduler->priv->shutdown_lock_ms));
crm_trace("Resources will be locked to nodes that were cleanly "
"shut down (locks expire after %s)",
pcmk__readable_interval(scheduler->priv->shutdown_lock_ms));
} else {
crm_trace("Resources will not be locked to nodes that were cleanly "
"shut down");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT);
pcmk_parse_interval_spec(value, &(scheduler->priv->node_pending_ms));
if (scheduler->priv->node_pending_ms == 0U) {
crm_trace("Do not fence pending nodes");
} else {
crm_trace("Fence pending nodes after %s",
pcmk__readable_interval(scheduler->priv->node_pending_ms));
}
return TRUE;
}
/*!
* \internal
* \brief Create a new node object in scheduler data
*
* \param[in] id ID of new node
* \param[in] uname Name of new node
* \param[in] type Type of new node
* \param[in] score Score of new node
* \param[in,out] scheduler Scheduler data
*
* \return Newly created node object
* \note The returned object is part of the scheduler data and should not be
* freed separately.
*/
pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
int score, pcmk_scheduler_t *scheduler)
{
enum pcmk__node_variant variant = pcmk__node_variant_cluster;
pcmk_node_t *new_node = NULL;
if (pcmk_find_node(scheduler, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
if (pcmk__str_eq(type, PCMK_VALUE_MEMBER,
pcmk__str_null_matches|pcmk__str_casei)) {
variant = pcmk__node_variant_cluster;
} else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) {
variant = pcmk__node_variant_remote;
} else {
pcmk__config_err("Ignoring node %s with unrecognized type '%s'",
pcmk__s(uname, "without name"), type);
return NULL;
}
new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
pcmk__sched_err(scheduler, "Could not allocate memory for node %s",
uname);
return NULL;
}
new_node->assign = calloc(1, sizeof(struct pcmk__node_assignment));
new_node->details = calloc(1, sizeof(struct pcmk__node_details));
new_node->priv = calloc(1, sizeof(pcmk__node_private_t));
if ((new_node->assign == NULL) || (new_node->details == NULL)
|| (new_node->priv == NULL)) {
free(new_node->assign);
free(new_node->details);
free(new_node->priv);
free(new_node);
pcmk__sched_err(scheduler, "Could not allocate memory for node %s",
uname);
return NULL;
}
crm_trace("Creating node for entry %s/%s", uname, id);
new_node->assign->score = score;
new_node->priv->id = id;
new_node->priv->name = uname;
new_node->priv->flags = pcmk__node_probes_allowed;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->running_rsc = NULL;
new_node->priv->scheduler = scheduler;
new_node->priv->variant = variant;
new_node->priv->attrs = pcmk__strkey_table(free, free);
new_node->priv->utilization = pcmk__strkey_table(free, free);
new_node->priv->digest_cache = pcmk__strkey_table(free, pe__free_digests);
if (pcmk__is_pacemaker_remote_node(new_node)) {
pcmk__insert_dup(new_node->priv->attrs, CRM_ATTR_KIND, "remote");
pcmk__set_scheduler_flags(scheduler, pcmk__sched_have_remote_nodes);
} else {
pcmk__insert_dup(new_node->priv->attrs, CRM_ATTR_KIND, "cluster");
}
scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
pe__cmp_node_name);
return new_node;
}
static const char *
expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
const char *container_id = pcmk__xe_id(xml_obj);
const char *remote_name = NULL;
const char *remote_server = NULL;
const char *remote_port = NULL;
const char *connect_timeout = "60s";
const char *remote_allow_migrate=NULL;
const char *is_managed = NULL;
for (attr_set = pcmk__xe_first_child(xml_obj, PCMK_XE_META_ATTRIBUTES,
NULL, NULL);
attr_set != NULL;
attr_set = pcmk__xe_next(attr_set, PCMK_XE_META_ATTRIBUTES)) {
for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL);
attr != NULL; attr = pcmk__xe_next(attr, NULL)) {
const char *value = crm_element_value(attr, PCMK_XA_VALUE);
const char *name = crm_element_value(attr, PCMK_XA_NAME);
if (name == NULL) { // Sanity
continue;
}
if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) {
remote_name = value;
} else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) {
remote_server = value;
} else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) {
remote_port = value;
} else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) {
connect_timeout = value;
} else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) {
remote_allow_migrate = value;
} else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) {
is_managed = value;
}
}
}
if (remote_name == NULL) {
return NULL;
}
if (pe_find_resource(data->priv->resources, remote_name) != NULL) {
return NULL;
}
pe_create_remote_xml(parent, remote_name, container_id,
remote_allow_migrate, is_managed,
connect_timeout, remote_server, remote_port);
return remote_name;
}
static void
handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
if ((new_node->priv->variant == pcmk__node_variant_remote)
&& (new_node->priv->remote == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
*/
return;
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
} else {
// Blind faith ...
new_node->details->unclean = FALSE;
}
}
gboolean
unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
for (xml_obj = pcmk__xe_first_child(xml_nodes, PCMK_XE_NODE, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, PCMK_XE_NODE)) {
int score = 0;
int rc = pcmk__xe_get_score(xml_obj, PCMK_XA_SCORE, &score, 0);
new_node = NULL;
id = crm_element_value(xml_obj, PCMK_XA_ID);
uname = crm_element_value(xml_obj, PCMK_XA_UNAME);
type = crm_element_value(xml_obj, PCMK_XA_TYPE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
pcmk__config_err("Ignoring <" PCMK_XE_NODE
"> entry in configuration without id");
continue;
}
if (rc != pcmk_rc_ok) {
// Not possible with schema validation enabled
pcmk__config_warn("Using 0 as score for node %s "
"because '%s' is not a valid score: %s",
pcmk__s(uname, "without name"),
crm_element_value(xml_obj, PCMK_XA_SCORE),
pcmk_rc_str(rc));
}
new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
handle_startup_fencing(scheduler, new_node);
add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s",
crm_element_value(xml_obj, PCMK_XA_UNAME));
}
return TRUE;
}
static void
unpack_launcher(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *launcher_id = NULL;
if (rsc->priv->children != NULL) {
g_list_foreach(rsc->priv->children, (GFunc) unpack_launcher,
scheduler);
return;
}
launcher_id = g_hash_table_lookup(rsc->priv->meta, PCMK__META_CONTAINER);
if ((launcher_id != NULL)
&& !pcmk__str_eq(launcher_id, rsc->id, pcmk__str_none)) {
pcmk_resource_t *launcher = pe_find_resource(scheduler->priv->resources,
launcher_id);
if (launcher != NULL) {
rsc->priv->launcher = launcher;
launcher->priv->launched =
g_list_append(launcher->priv->launched, rsc);
pcmk__rsc_trace(rsc, "Resource %s's launcher is %s",
rsc->id, launcher_id);
} else {
pcmk__config_err("Resource %s: Unknown " PCMK__META_CONTAINER " %s",
rsc->id, launcher_id);
}
}
}
gboolean
unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
/* Create remote nodes and guest nodes from the resource configuration
* before unpacking resources.
*/
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) {
const char *new_node_id = NULL;
/* Check for remote nodes, which are defined by ocf:pacemaker:remote
* primitives.
*/
if (xml_contains_remote_node(xml_obj)) {
new_node_id = pcmk__xe_id(xml_obj);
/* The pcmk_find_node() check ensures we don't iterate over an
* expanded node that has already been added to the node list
*/
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
0, scheduler);
}
continue;
}
/* Check for guest nodes, which are defined by special meta-attributes
* of a primitive of any type (for example, VirtualDomain or Xen).
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) {
/* This will add an ocf:pacemaker:remote primitive to the
* configuration for the guest node's connection, to be unpacked
* later.
*/
new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
0, scheduler);
}
continue;
}
/* Check for guest nodes inside a group. Clones are currently not
* supported as guest nodes.
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) {
xmlNode *xml_obj2 = NULL;
for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2, NULL)) {
new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, pcmk__xe_id(xml_obj2),
pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
0, scheduler);
}
}
}
}
return TRUE;
}
/* Call this after all the nodes and resources have been
* unpacked, but before the status section is read.
*
* A remote node's online status is reflected by the state
* of the remote node's connection resource. We need to link
* the remote node to this connection resource so we can have
* easy access to the connection resource during the scheduler calculations.
*/
static void
link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
pcmk_node_t *remote_node = NULL;
if (!pcmk_is_set(new_rsc->flags, pcmk__rsc_is_remote_connection)) {
return;
}
if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
remote_node = pcmk_find_node(scheduler, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
new_rsc->id, pcmk__node_name(remote_node));
remote_node->priv->remote = new_rsc;
if (new_rsc->priv->launcher == NULL) {
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
* that we know the node is a guest node, update it correctly.
*/
pcmk__insert_dup(remote_node->priv->attrs,
CRM_ATTR_KIND, "container");
}
}
/*!
* \internal
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
* \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
* \note unpack_remote_nodes() MUST be called before this, so that the nodes can
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
scheduler->priv->templates = pcmk__strkey_table(free, pcmk__free_idref);
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) {
pcmk_resource_t *new_rsc = NULL;
const char *id = pcmk__xe_id(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
xml_obj->name);
continue;
}
if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) {
if (g_hash_table_lookup_extended(scheduler->priv->templates, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
pcmk__insert_dup(scheduler->priv->templates, id, NULL);
}
continue;
}
crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
scheduler) == pcmk_rc_ok) {
scheduler->priv->resources =
g_list_append(scheduler->priv->resources, new_rsc);
pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
xml_obj->name, id);
}
}
for (gIter = scheduler->priv->resources;
gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
unpack_launcher(rsc, scheduler);
link_rsc2remotenode(scheduler, rsc);
}
scheduler->priv->resources = g_list_sort(scheduler->priv->resources,
pe__cmp_rsc_priority);
if (pcmk_is_set(scheduler->flags, pcmk__sched_location_only)) {
/* Ignore */
} else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)
&& !pcmk_is_set(scheduler->flags, pcmk__sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the "
PCMK_OPT_STONITH_ENABLED " option");
pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
/*!
* \internal
* \brief Validate the levels in a fencing topology
*
* \param[in] xml \c PCMK_XE_FENCING_TOPOLOGY element
*/
void
pcmk__validate_fencing_topology(const xmlNode *xml)
{
if (xml == NULL) {
return;
}
CRM_CHECK(pcmk__xe_is(xml, PCMK_XE_FENCING_TOPOLOGY), return);
for (const xmlNode *level = pcmk__xe_first_child(xml, PCMK_XE_FENCING_LEVEL,
NULL, NULL);
level != NULL; level = pcmk__xe_next(level, PCMK_XE_FENCING_LEVEL)) {
const char *id = pcmk__xe_id(level);
int index = 0;
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring fencing level without ID");
continue;
}
if (crm_element_value_int(level, PCMK_XA_INDEX, &index) != 0) {
pcmk__config_err("Ignoring fencing level %s with invalid index",
id);
continue;
}
if ((index < ST__LEVEL_MIN) || (index > ST__LEVEL_MAX)) {
pcmk__config_err("Ignoring fencing level %s with out-of-range "
"index %d",
id, index);
}
}
}
gboolean
unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
scheduler->priv->tags = pcmk__strkey_table(free, pcmk__free_idref);
for (xml_tag = pcmk__xe_first_child(xml_tags, PCMK_XE_TAG, NULL, NULL);
xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag, PCMK_XE_TAG)) {
xmlNode *xml_obj_ref = NULL;
const char *tag_id = pcmk__xe_id(xml_tag);
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID,
(const char *) xml_tag->name);
continue;
}
for (xml_obj_ref = pcmk__xe_first_child(xml_tag, PCMK_XE_OBJ_REF,
NULL, NULL);
xml_obj_ref != NULL;
xml_obj_ref = pcmk__xe_next(xml_obj_ref, PCMK_XE_OBJ_REF)) {
const char *obj_ref = pcmk__xe_id(xml_obj_ref);
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID,
xml_obj_ref->name, tag_id);
continue;
}
pcmk__add_idref(scheduler->priv->tags, tag_id, obj_ref);
}
}
return TRUE;
}
/*!
* \internal
* \brief Unpack a ticket state entry
*
* \param[in] xml_ticket XML ticket state to unpack
* \param[in,out] userdata Scheduler data
*
* \return pcmk_rc_ok (to always continue unpacking further entries)
*/
static int
unpack_ticket_state(xmlNode *xml_ticket, void *userdata)
{
pcmk_scheduler_t *scheduler = userdata;
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
pcmk__ticket_t *ticket = NULL;
ticket_id = pcmk__xe_id(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
return pcmk_rc_ok;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(scheduler->priv->ticket_constraints,
ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return pcmk_rc_ok;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) {
continue;
}
pcmk__insert_dup(ticket->state, prop_name, prop_value);
}
granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED);
if (granted && crm_is_true(granted)) {
pcmk__set_ticket_flags(ticket, pcmk__ticket_granted);
crm_info("We have ticket '%s'", ticket->id);
} else {
pcmk__clear_ticket_flags(ticket, pcmk__ticket_granted);
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED);
if (last_granted) {
long long last_granted_ll = 0LL;
int rc = pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
if (rc != pcmk_rc_ok) {
crm_warn("Using %lld instead of invalid " PCMK_XA_LAST_GRANTED
" value '%s' in state for ticket %s: %s",
last_granted_ll, last_granted, ticket->id,
pcmk_rc_str(rc));
}
ticket->last_granted = (time_t) last_granted_ll;
}
standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY);
if (standby && crm_is_true(standby)) {
pcmk__set_ticket_flags(ticket, pcmk__ticket_standby);
if (pcmk_is_set(ticket->flags, pcmk__ticket_granted)) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
pcmk__clear_ticket_flags(ticket, pcmk__ticket_standby);
}
crm_trace("Done with ticket state for %s", ticket_id);
return pcmk_rc_ok;
}
static void
unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = NULL;
pcmk_resource_t *rsc = NULL;
int maint = 0;
if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
return;
}
if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) {
return;
}
crm_trace("Processing Pacemaker Remote node %s",
pcmk__node_name(this_node));
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE),
&maint, 0);
if (maint) {
pcmk__set_node_flags(this_node, pcmk__node_remote_maint);
} else {
pcmk__clear_node_flags(this_node, pcmk__node_remote_maint);
}
rsc = this_node->priv->remote;
if (!pcmk_is_set(this_node->priv->flags, pcmk__node_remote_reset)) {
this_node->details->unclean = FALSE;
pcmk__set_node_flags(this_node, pcmk__node_seen);
}
attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL,
NULL);
add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pcmk__node_name(this_node));
this_node->details->shutdown = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(this_node));
pcmk__set_node_flags(this_node, pcmk__node_standby);
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))
|| ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed))) {
crm_info("%s is in maintenance mode", pcmk__node_name(this_node));
this_node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(this_node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__warn_once(pcmk__wo_rdisc_enabled,
"Support for the "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" node attribute is deprecated and will be removed"
" (and behave as 'true') in a future release.");
if (pcmk__is_remote_node(this_node)
&& !pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
pcmk__node_name(this_node));
} else {
/* This is either a remote node with fencing enabled, or a guest
* node. We don't care whether fencing is enabled when fencing guest
* nodes, because they are "fenced" by recovering their containing
* resource.
*/
crm_info("%s has resource discovery disabled",
pcmk__node_name(this_node));
pcmk__clear_node_flags(this_node, pcmk__node_probes_allowed);
}
}
}
/*!
* \internal
* \brief Unpack a cluster node's transient attributes
*
* \param[in] state CIB node state XML
* \param[in,out] node Cluster node whose attributes are being unpacked
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = pcmk__xe_first_child(state,
PCMK__XE_TRANSIENT_ATTRIBUTES,
NULL, NULL);
add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(node));
pcmk__set_node_flags(node, pcmk__node_standby);
}
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in maintenance mode", pcmk__node_name(node));
node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute for %s because disabling resource"
" discovery is not allowed for cluster nodes",
pcmk__node_name(node));
}
}
/*!
* \internal
* \brief Unpack a node state entry (first pass)
*
* Unpack one node state entry from status. This unpacks information from the
* \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not
* the resource history inside it. Multiple passes through the status are needed
* to fully unpack everything.
*
* \param[in] state CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
pcmk_node_t *this_node = NULL;
id = crm_element_value(state, PCMK_XA_ID);
if (id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without "
PCMK_XA_ID);
crm_log_xml_info(state, "missing-id");
return;
}
uname = crm_element_value(state, PCMK_XA_UNAME);
if (uname == NULL) {
/* If a joining peer makes the cluster acquire the quorum from Corosync
* but has not joined the controller CPG membership yet, it's possible
* that the created PCMK__XE_NODE_STATE entry doesn't have a
* PCMK_XA_UNAME yet. Recognize the node as pending and wait for it to
* join CPG.
*/
crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" "
"without " PCMK_XA_UNAME,
id);
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
crm_notice("Ignoring recorded state for removed node with name %s and "
PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id);
return;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
int remote_fenced = 0;
/* We can't determine the online status of Pacemaker Remote nodes until
* after all resource history has been unpacked. In this first pass, we
* do need to mark whether the node has been fenced, as this plays a
* role during unpacking cluster node resource state.
*/
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED),
&remote_fenced, 0);
if (remote_fenced) {
pcmk__set_node_flags(this_node, pcmk__node_remote_fenced);
} else {
pcmk__clear_node_flags(this_node, pcmk__node_remote_fenced);
}
return;
}
unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
*/
this_node->details->unclean = FALSE;
pcmk__set_node_flags(this_node, pcmk__node_seen);
crm_trace("Determining online status of cluster node %s (id %s)",
pcmk__node_name(this_node), id);
determine_online_status(state, this_node, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_quorate)
&& this_node->details->online
&& (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
/*!
* \internal
* \brief Unpack nodes' resource history as much as possible
*
* Unpack as many nodes' resource history as possible in one pass through the
* status. We need to process Pacemaker Remote nodes' connections/containers
* before unpacking their history; the connection/container history will be
* in another node's history, so it might take multiple passes to unpack
* everything.
*
* \param[in] status CIB XML status section
* \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
* \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
// Loop through all PCMK__XE_NODE_STATE entries in CIB status
for (const xmlNode *state = pcmk__xe_first_child(status,
PCMK__XE_NODE_STATE, NULL,
NULL);
state != NULL; state = pcmk__xe_next(state, PCMK__XE_NODE_STATE)) {
const char *id = pcmk__xe_id(state);
const char *uname = crm_element_value(state, PCMK_XA_UNAME);
pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history from malformed "
PCMK__XE_NODE_STATE " without id and/or uname");
continue;
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
"no longer in configuration", id);
continue;
}
if (pcmk_is_set(this_node->priv->flags, pcmk__node_unpacked)) {
crm_trace("Not unpacking resource history for node %s because "
"already unpacked", id);
continue;
}
if (fence) {
// We're processing all remaining nodes
} else if (pcmk__is_guest_or_bundle_node(this_node)) {
/* We can unpack a guest node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
const pcmk_resource_t *remote = this_node->priv->remote;
const pcmk_resource_t *launcher = remote->priv->launcher;
if ((remote->priv->orig_role != pcmk_role_started)
|| (launcher->priv->orig_role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because launcher and connection are not known to "
"be up", id);
continue;
}
} else if (pcmk__is_remote_node(this_node)) {
/* We can unpack a remote node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection is up, with the exception of when shutdown locks are
* in use.
*/
pcmk_resource_t *rsc = this_node->priv->remote;
if ((rsc == NULL)
|| (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)
&& (rsc->priv->orig_role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
}
/* If fencing and shutdown locks are disabled and we're not processing
* unseen nodes, then we don't want to unpack offline nodes until online
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
} else if (!pcmk_any_flags_set(scheduler->flags,
pcmk__sched_fencing_enabled
|pcmk__sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
continue;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
pcmk__set_node_flags(this_node, pcmk__node_unpacked);
unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
return rc;
}
/* remove nodes that are down, stopping */
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
if (scheduler->priv->ticket_constraints == NULL) {
scheduler->priv->ticket_constraints =
pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL;
state = pcmk__xe_next(state, NULL)) {
if (pcmk__xe_is(state, PCMK_XE_TICKETS)) {
pcmk__xe_foreach_child(state, PCMK__XE_TICKET_STATE,
unpack_ticket_state, scheduler);
} else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
unpack_node_state(state, scheduler);
}
}
while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
pcmk_is_set(scheduler->flags,
pcmk__sched_fencing_enabled),
scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
if (scheduler->priv->stop_needed != NULL) {
for (GList *item = scheduler->priv->stop_needed;
item != NULL; item = item->next) {
pcmk_resource_t *container = item->data;
pcmk_node_t *node = pcmk__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
g_list_free(scheduler->priv->stop_needed);
scheduler->priv->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *this_node = gIter->data;
if (!pcmk__is_pacemaker_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->priv->remote != NULL)) {
pe__set_next_role(this_node->priv->remote, pcmk_role_stopped,
"remote shutdown");
}
if (!pcmk_is_set(this_node->priv->flags, pcmk__node_unpacked)) {
determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
/*!
* \internal
* \brief Unpack node's time when it became a member at the cluster layer
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
* \param[in,out] scheduler Scheduler data
*
* \return Epoch time when node became a cluster member
* (or scheduler effective time for legacy entries) if a member,
* 0 if not a member, or -1 if no valid information available
*/
static long long
unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
{
const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
int member = 0;
if (member_time == NULL) {
return -1LL;
} else if (crm_str_to_boolean(member_time, &member) == 1) {
/* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
* recorded as a boolean for a DC < 2.1.7, or the node is pending
* shutdown and has left the CPG, in which case it was set to 1 to avoid
* fencing for PCMK_OPT_NODE_PENDING_TIMEOUT.
*
* We return the effective time for in_ccm=1 because what's important to
* avoid fencing is that effective time minus this value is less than
* the pending node timeout.
*/
return member? (long long) pcmk__scheduler_epoch_time(scheduler) : 0LL;
} else {
long long when_member = 0LL;
if ((pcmk__scan_ll(member_time, &when_member,
0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
" in " PCMK__XE_NODE_STATE " entry", member_time);
return -1LL;
}
return when_member;
}
}
/*!
* \internal
* \brief Unpack node's time when it became online in process group
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
*
* \return Epoch time when node became online in process group (or 0 if not
* online, or 1 for legacy online entries)
*/
static long long
unpack_node_online(const xmlNode *node_state)
{
const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD);
// @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE,
pcmk__str_casei|pcmk__str_null_matches)) {
return 0LL;
} else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
return 1LL;
} else {
long long when_online = 0LL;
if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
|| (when_online < 0)) {
crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in "
PCMK__XE_NODE_STATE " entry, assuming offline", peer_time);
return 0LL;
}
return when_online;
}
}
/*!
* \internal
* \brief Unpack node attribute for user-requested fencing
*
* \param[in] node Node to check
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status
*
* \return \c true if fencing has been requested for \p node, otherwise \c false
*/
static bool
unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
{
long long value = 0LL;
int value_i = 0;
int rc = pcmk_rc_ok;
const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE,
NULL, pcmk__rsc_node_current);
// Value may be boolean or an epoch time
if (crm_str_to_boolean(value_s, &value_i) == 1) {
return (value_i != 0);
}
rc = pcmk__scan_ll(value_s, &value, 0LL);
if (rc == pcmk_rc_ok) {
return (value > 0);
}
crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
"node attribute for %s: %s",
value_s, pcmk__node_name(node), pcmk_rc_str(rc));
return false;
}
static gboolean
determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
gboolean online = FALSE;
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
if (when_member <= 0) {
crm_trace("Node %s is %sdown", pcmk__node_name(this_node),
((when_member < 0)? "presumed " : ""));
} else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
crm_debug("Node %s is not ready to run resources: %s",
pcmk__node_name(this_node), join);
}
} else if (!pcmk_is_set(this_node->priv->flags,
pcmk__node_expected_up)) {
crm_trace("Node %s controller is down: "
"member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
/*!
* \internal
* \brief Check whether a node has taken too long to join controller group
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node to check
* \param[in] when_member Epoch time when node became a cluster member
* \param[in] when_online Epoch time when node joined controller group
*
* \return true if node has been pending (on the way up) longer than
* \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false
* \note This will also update the cluster's recheck time if appropriate.
*/
static inline bool
pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
long long when_member, long long when_online)
{
if ((scheduler->priv->node_pending_ms > 0U)
&& (when_member > 0) && (when_online <= 0)) {
// There is a timeout on pending nodes, and node is pending
time_t timeout = when_member
+ pcmk__timeout_ms2s(scheduler->priv->node_pending_ms);
if (pcmk__scheduler_epoch_time(node->priv->scheduler) >= timeout) {
return true; // Node has timed out
}
// Node is pending, but still has time
pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
return false;
}
static bool
determine_online_status_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
bool termination_requested = unpack_node_terminate(this_node, node_state);
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
/*
- PCMK__XA_JOIN ::= member|down|pending|banned
- PCMK_XA_EXPECTED ::= member|down
@COMPAT with entries recorded for DCs < 2.1.7
- PCMK__XA_IN_CCM ::= true|false
- PCMK_XA_CRMD ::= online|offline
Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
- PCMK__XA_IN_CCM ::= <timestamp>|0
Since when node has been a cluster member. A value 0 of means the node is not
a cluster member.
- PCMK_XA_CRMD ::= <timestamp>|0
Since when peer has been online in CPG. A value 0 means the peer is offline
in CPG.
*/
crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
(termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pcmk__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
return (when_online > 0);
}
if (when_member < 0) {
pe_fence_node(scheduler, this_node,
"peer has not been seen by the cluster", FALSE);
return false;
}
if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
} else if (termination_requested) {
if ((when_member <= 0) && (when_online <= 0)
&& pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
crm_info("%s was fenced as requested", pcmk__node_name(this_node));
return false;
}
pe_fence_node(scheduler, this_node, "fencing was requested", false);
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
pcmk__str_null_matches)) {
if (pending_too_long(scheduler, this_node, when_member, when_online)) {
pe_fence_node(scheduler, this_node,
"peer pending timed out on joining the process group",
FALSE);
} else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pcmk__node_name(this_node));
pcmk__set_node_flags(this_node, pcmk__node_standby);
this_node->details->pending = TRUE;
} else {
crm_trace("%s is down or still coming up",
pcmk__node_name(this_node));
}
} else if (when_member <= 0) {
// Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes
pe_fence_node(scheduler, this_node,
"peer is no longer part of the cluster", TRUE);
} else if (when_online <= 0) {
pe_fence_node(scheduler, this_node,
"peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
} else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pcmk__node_name(this_node));
} else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources",
pcmk__node_name(this_node));
pcmk__set_node_flags(this_node, pcmk__node_standby);
this_node->details->pending = TRUE;
} else {
pe_fence_node(scheduler, this_node, "peer was in an unknown state",
FALSE);
}
return (when_member > 0);
}
static void
determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node)
{
pcmk_resource_t *rsc = this_node->priv->remote;
pcmk_resource_t *launcher = NULL;
pcmk_node_t *host = NULL;
const char *node_type = "Remote";
if (rsc == NULL) {
/* This is a leftover node state entry for a former Pacemaker Remote
* node whose connection resource was removed. Consider it offline.
*/
crm_trace("Pacemaker Remote node %s is considered OFFLINE because "
"its connection resource has been removed from the CIB",
this_node->priv->id);
this_node->details->online = FALSE;
return;
}
launcher = rsc->priv->launcher;
if (launcher != NULL) {
node_type = "Guest";
if (pcmk__list_of_1(rsc->priv->active_nodes)) {
host = rsc->priv->active_nodes->data;
}
}
/* If the resource is currently started, mark it online. */
if (rsc->priv->orig_role == pcmk_role_started) {
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
if ((rsc->priv->orig_role == pcmk_role_started)
&& (rsc->priv->next_role == pcmk_role_stopped)) {
crm_trace("%s node %s shutting down because connection resource is stopping",
node_type, this_node->priv->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
if ((launcher != NULL) && pcmk_is_set(launcher->flags, pcmk__rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->priv->id);
this_node->details->online = FALSE;
pcmk__set_node_flags(this_node, pcmk__node_remote_reset);
} else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
node_type, this_node->priv->id);
this_node->details->online = FALSE;
} else if ((rsc->priv->orig_role == pcmk_role_stopped)
|| ((launcher != NULL)
&& (launcher->priv->orig_role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
node_type, this_node->priv->id);
this_node->details->online = FALSE;
pcmk__clear_node_flags(this_node, pcmk__node_remote_reset);
} else if (host && (host->details->online == FALSE)
&& host->details->unclean) {
crm_trace("Guest node %s UNCLEAN because host is unclean",
this_node->priv->id);
this_node->details->online = FALSE;
pcmk__set_node_flags(this_node, pcmk__node_remote_reset);
} else {
crm_trace("%s node %s is %s",
node_type, this_node->priv->id,
this_node->details->online? "ONLINE" : "OFFLINE");
}
}
static void
determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
this_node->details->shutdown = FALSE;
if (pe__shutdown_requested(this_node)) {
this_node->details->shutdown = TRUE;
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
pcmk__set_node_flags(this_node, pcmk__node_expected_up);
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
online = determine_online_status_no_fencing(scheduler, node_state,
this_node);
} else {
online = determine_online_status_fencing(scheduler, node_state,
this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->assign->score = -PCMK_SCORE_INFINITY;
}
if (online && this_node->details->shutdown) {
/* don't run resources here */
this_node->assign->score = -PCMK_SCORE_INFINITY;
}
if (this_node->details->unclean) {
pcmk__sched_warn(scheduler, "%s is unclean",
pcmk__node_name(this_node));
} else if (!this_node->details->online) {
crm_trace("%s is offline", pcmk__node_name(this_node));
} else if (this_node->details->shutdown) {
crm_info("%s is shutting down", pcmk__node_name(this_node));
} else if (this_node->details->pending) {
crm_info("%s is pending", pcmk__node_name(this_node));
} else if (pcmk_is_set(this_node->priv->flags, pcmk__node_standby)) {
crm_info("%s is in standby", pcmk__node_name(this_node));
} else if (this_node->details->maintenance) {
crm_info("%s is in maintenance", pcmk__node_name(this_node));
} else {
crm_info("%s is online", pcmk__node_name(this_node));
}
}
/*!
* \internal
* \brief Find the end of a resource's name, excluding any clone suffix
*
* \param[in] id Resource ID to check
*
* \return Pointer to last character of resource's base name
*/
const char *
pe_base_name_end(const char *id)
{
if (!pcmk__str_empty(id)) {
const char *end = id + strlen(id) - 1;
for (const char *s = end; s > id; --s) {
switch (*s) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
return (s == end)? s : (s - 1);
default:
return end;
}
}
return end;
}
return NULL;
}
/*!
* \internal
* \brief Get a resource name excluding any clone suffix
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_strip(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
char *basename = NULL;
pcmk__assert(end != NULL);
basename = strndup(last_rsc_id, end - last_rsc_id + 1);
pcmk__assert(basename != NULL);
return basename;
}
/*!
* \internal
* \brief Get the name of the first instance of a cloned resource
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name plus :0
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_zero(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
size_t base_name_len = end - last_rsc_id + 1;
char *zero = NULL;
pcmk__assert(end != NULL);
zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char));
memcpy(zero, last_rsc_id, base_name_len);
zero[base_name_len] = ':';
zero[base_name_len + 1] = '0';
return zero;
}
static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE);
pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none);
crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
node = pcmk_find_node(scheduler, rsc_id);
if (node == NULL) {
node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, 0,
scheduler);
}
link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
node->details->shutdown = TRUE;
}
}
if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) {
// This removed resource needs to be mapped to a launcher
crm_trace("Launched resource %s was removed from the configuration",
rsc_id);
pcmk__set_rsc_flags(rsc, pcmk__rsc_removed_launched);
}
pcmk__set_rsc_flags(rsc, pcmk__rsc_removed);
scheduler->priv->resources = g_list_append(scheduler->priv->resources, rsc);
return rsc;
}
/*!
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
* \param[in,out] parent Clone resource that orphan will be added to
* \param[in] rsc_id Orphan's resource ID
* \param[in] node Where orphan is active (for logging only)
* \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
static pcmk_resource_t *
create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
pcmk_resource_t *orphan = NULL;
// find_rsc() because we might be a cloned group
orphan = top->priv->fns->find_rsc(top, rsc_id, NULL,
pcmk_rsc_match_clone_only);
pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pcmk__node_name(node));
return orphan;
}
/*!
* \internal
* \brief Check a node for an instance of an anonymous clone
*
* Return a child instance of the specified anonymous clone, in order of
* preference: (1) the instance running on the specified node, if any;
* (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX
* instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX
* instances are already active).
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node on which to check for instance
* \param[in,out] parent Clone to check
* \param[in] rsc_id Name of cloned resource in history (no instance)
*/
static pcmk_resource_t *
find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
pcmk__assert(pcmk__is_anonymous_clone(parent));
// Check for active (or partially active, for cloned groups) instance
pcmk__rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pcmk__node_name(node), parent->id);
for (rIter = parent->priv->children;
(rIter != NULL) && (rsc == NULL); rIter = rIter->next) {
GList *locations = NULL;
pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
* for a resource before the resource's individual operation history
* entries are unpacked, locations will generally not contain the
* desired node.
*
* However, there are three exceptions:
* (1) when child is a cloned group and we have already unpacked the
* history of another member of the group on the same node;
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if
* PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and
* (3) when we re-run calculations on the same scheduler data as part of
* a simulation.
*/
child->priv->fns->location(child, &locations, pcmk__rsc_node_current
|pcmk__rsc_node_pending);
if (locations) {
/* We should never associate the same numbered anonymous clone
* instance with multiple nodes, and clone instances can't migrate,
* so there must be only one location, regardless of history.
*/
CRM_LOG_ASSERT(locations->next == NULL);
if (pcmk__same_node((pcmk_node_t *) locations->data, node)) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
* need the particular member corresponding to rsc_id.
*
* If the history entry is orphaned, rsc will be NULL.
*/
rsc = parent->priv->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
* happen if PCMK_META_GLOBALLY_UNIQUE is switched from true
* to false), we want to consider the instances beyond the
* first as orphans, even if there are inactive instance
* numbers available.
*/
if (rsc->priv->active_nodes != NULL) {
crm_notice("Active (now-)anonymous clone %s has "
"multiple (orphan) instance histories on %s",
parent->id, pcmk__node_name(node));
skip_inactive = TRUE;
rsc = NULL;
} else {
pcmk__rsc_trace(parent, "Resource %s, active", rsc->id);
}
}
}
g_list_free(locations);
} else {
pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
&& !pcmk_is_set(child->flags, pcmk__rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance =
parent->priv->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
*/
if (inactive_instance != NULL) {
const pcmk_node_t *pending_node = NULL;
pending_node = inactive_instance->priv->pending_node;
if ((pending_node != NULL)
&& !pcmk__same_node(pending_node, node)) {
inactive_instance = NULL;
}
}
}
}
}
if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
pcmk__rsc_trace(parent, "Resource %s, empty slot",
inactive_instance->id);
rsc = inactive_instance;
}
/* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or
* PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we
* don't want to consume a valid instance number for unclean nodes. Such
* instances may appear to be active according to the history, but should be
* considered inactive, so we can start an instance elsewhere. Treat such
* instances as orphans.
*
* An exception is instances running on guest nodes -- since guest node
* "fencing" is actually just a resource stop, requires shouldn't apply.
*
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pcmk__is_guest_or_bundle_node(node)
&& !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
static pcmk_resource_t *
unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(scheduler->priv->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
* check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0,
* we create a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
pcmk_resource_t *clone0 = pe_find_resource(scheduler->priv->resources,
clone0_id);
if (clone0 && !pcmk_is_set(clone0->flags, pcmk__rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
} else {
crm_trace("%s is not known as %s either (orphan)",
rsc_id, clone0_id);
}
free(clone0_id);
} else if (rsc->priv->variant > pcmk__rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned "
"because it is no longer primitive", rsc_id);
return NULL;
} else {
parent = uber_parent(rsc);
}
if (pcmk__is_anonymous_clone(parent)) {
if (pcmk__is_bundled(parent)) {
rsc = pe__find_bundle_replica(parent->priv->parent, node);
} else {
char *base = clone_strip(rsc_id);
rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
pcmk__assert(rsc != NULL);
}
}
if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none)
&& !pcmk__str_eq(rsc_id, rsc->priv->history_id, pcmk__str_none)) {
pcmk__str_update(&(rsc->priv->history_id), rsc_id);
pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pcmk__node_name(node), rsc->id,
pcmk_is_set(rsc->flags, pcmk__rsc_removed)? " (ORPHAN)" : "");
}
return rsc;
}
static pcmk_resource_t *
process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
crm_debug("Detected orphan resource %s on %s",
rsc_id, pcmk__node_name(node));
rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_stop_removed_resources)) {
pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id);
resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
"__orphan_do_not_run__", scheduler);
}
return rsc;
}
static void
process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum pcmk__on_fail on_fail)
{
pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
enum pcmk__on_fail save_on_fail = pcmk__on_fail_ignore;
pcmk_scheduler_t *scheduler = NULL;
bool known_active = false;
pcmk__assert(rsc != NULL);
scheduler = rsc->priv->scheduler;
known_active = (rsc->priv->orig_role > pcmk_role_stopped);
pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
rsc->id, pcmk_role_text(rsc->priv->orig_role),
pcmk__node_name(node), pcmk__on_fail_text(on_fail));
/* process current state */
if (rsc->priv->orig_role != pcmk_role_unknown) {
pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->priv->probed_nodes,
node->priv->id) == NULL) {
pcmk_node_t *n = pe__copy_node(node);
pcmk__rsc_trace(rsc, "%s (%s in history) known on %s",
rsc->id,
pcmk__s(rsc->priv->history_id, "the same"),
pcmk__node_name(n));
g_hash_table_insert(iter->priv->probed_nodes,
(gpointer) n->priv->id, n);
}
if (pcmk_is_set(iter->flags, pcmk__rsc_unique)) {
break;
}
iter = iter->priv->parent;
}
}
/* If a managed resource is believed to be running, but node is down ... */
if (known_active && !node->details->online && !node->details->maintenance
&& pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
gboolean should_fence = FALSE;
/* If this is a guest node, fence it (regardless of whether fencing is
* enabled, because guest node fencing is done by recovery of the
* container resource rather than by the fencer). Mark the resource
* we're processing as failed. When the guest comes back up, its
* operation history in the CIB will be cleared, freeing the affected
* resource to run again once we are sure we know its state.
*/
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
should_fence = TRUE;
} else if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
if (pcmk__is_remote_node(node)
&& (node->priv->remote != NULL)
&& !pcmk_is_set(node->priv->remote->flags,
pcmk__rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
* somewhere. This allows connection resources on a failed
* cluster node to move to another node without requiring the
* remote nodes to be fenced as well.
*/
pcmk__clear_node_flags(node, pcmk__node_seen);
reason = crm_strdup_printf("%s is active there (fencing will be"
" revoked if remote connection can "
"be re-established elsewhere)",
rsc->id);
}
should_fence = TRUE;
}
if (should_fence) {
if (reason == NULL) {
reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
}
pe_fence_node(scheduler, node, reason, FALSE);
}
free(reason);
}
/* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
save_on_fail = on_fail;
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = pcmk__on_fail_ignore;
}
switch (on_fail) {
case pcmk__on_fail_ignore:
/* nothing to do */
break;
case pcmk__on_fail_demote:
pcmk__set_rsc_flags(rsc, pcmk__rsc_failed);
demote_action(rsc, node, FALSE);
break;
case pcmk__on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
reason = crm_strdup_printf("%s failed there", rsc->id);
pe_fence_node(scheduler, node, reason, FALSE);
free(reason);
break;
case pcmk__on_fail_standby_node:
pcmk__set_node_flags(node,
pcmk__node_standby|pcmk__node_fail_standby);
break;
case pcmk__on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk__rsc_blocked);
break;
case pcmk__on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -PCMK_SCORE_INFINITY,
"__action_migration_auto__", scheduler);
break;
case pcmk__on_fail_stop:
pe__set_next_role(rsc, pcmk_role_stopped,
PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP);
break;
case pcmk__on_fail_restart:
if (known_active) {
pcmk__set_rsc_flags(rsc,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
case pcmk__on_fail_restart_container:
pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
if ((rsc->priv->launcher != NULL) && pcmk__is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
* container is running yet, so remember it and add a stop
* action for it later.
*/
scheduler->priv->stop_needed =
g_list_prepend(scheduler->priv->stop_needed,
rsc->priv->launcher);
} else if (rsc->priv->launcher != NULL) {
stop_action(rsc->priv->launcher, node, FALSE);
} else if (known_active) {
stop_action(rsc, node, FALSE);
}
break;
case pcmk__on_fail_reset_remote:
pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
tmpnode = NULL;
if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
tmpnode = pcmk_find_node(scheduler, rsc->id);
}
if (pcmk__is_remote_node(tmpnode)
&& !pcmk_is_set(tmpnode->priv->flags,
pcmk__node_remote_fenced)) {
/* The remote connection resource failed in a way that
* should result in fencing the remote node.
*/
pe_fence_node(scheduler, tmpnode,
"remote connection is unrecoverable", FALSE);
}
}
/* require the stop action regardless if fencing is occurring or not. */
if (known_active) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->priv->remote_reconnect_ms > 0U) {
pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
/* Ensure a remote connection failure forces an unclean Pacemaker Remote
* node to be fenced. By marking the node as seen, the failure will result
* in a fencing operation regardless if we're going to attempt to reconnect
* in this transition.
*/
if (pcmk_all_flags_set(rsc->flags,
pcmk__rsc_failed|pcmk__rsc_is_remote_connection)) {
tmpnode = pcmk_find_node(scheduler, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
pcmk__set_node_flags(tmpnode, pcmk__node_seen);
}
}
if (known_active) {
if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
crm_notice("Removed resource %s is active on %s and will be "
"stopped when possible",
rsc->id, pcmk__node_name(node));
} else {
crm_notice("Removed resource %s must be stopped manually on %s "
"because " PCMK_OPT_STOP_ORPHAN_RESOURCES
" is set to false", rsc->id, pcmk__node_name(node));
}
}
native_add_running(rsc, node, scheduler,
(save_on_fail != pcmk__on_fail_ignore));
switch (on_fail) {
case pcmk__on_fail_ignore:
break;
case pcmk__on_fail_demote:
case pcmk__on_fail_block:
pcmk__set_rsc_flags(rsc, pcmk__rsc_failed);
break;
default:
pcmk__set_rsc_flags(rsc,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
break;
}
} else if ((rsc->priv->history_id != NULL)
&& (strchr(rsc->priv->history_id, ':') != NULL)) {
/* Only do this for older status sections that included instance numbers
* Otherwise stopped instances will appear as orphans
*/
pcmk__rsc_trace(rsc, "Clearing history ID %s for %s (stopped)",
rsc->priv->history_id, rsc->id);
free(rsc->priv->history_id);
rsc->priv->history_id = NULL;
} else {
GList *possible_matches = pe__resource_actions(rsc, node,
PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
pcmk__set_action_flags(stop, pcmk__action_optional);
}
g_list_free(possible_matches);
}
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
if ((rsc->priv->orig_role == pcmk_role_stopped)
&& (rsc->priv->active_nodes != NULL)
&& (rsc->priv->partial_migration_target != NULL)
&& pcmk__same_node(rsc->priv->partial_migration_source, node)) {
rsc->priv->orig_role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GList *gIter = sorted_op_list;
pcmk__assert(rsc != NULL);
pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d",
rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
char *key = NULL;
const char *id = pcmk__xe_id(rsc_op);
counter++;
if (node->details->online == FALSE) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline",
rsc->id, pcmk__node_name(node));
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active",
id, pcmk__node_name(node));
continue;
} else if (counter < start_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d",
id, pcmk__node_name(node), counter);
continue;
}
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if (interval_ms == 0) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring",
id, pcmk__node_name(node));
continue;
}
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: status",
id, pcmk__node_name(node));
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node));
custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
void
calculate_active_ops(const GList *sorted_op_list, int *start_index,
int *stop_index)
{
int counter = -1;
int implied_monitor_start = -1;
int implied_clone_start = -1;
const char *task = NULL;
const char *status = NULL;
*stop_index = -1;
*start_index = -1;
for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
const xmlNode *rsc_op = (const xmlNode *) iter->data;
counter++;
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
} else if ((implied_monitor_start <= *stop_index)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
if (*start_index == -1) {
if (implied_clone_start != -1) {
*start_index = implied_clone_start;
} else if (implied_monitor_start != -1) {
*start_index = implied_monitor_start;
}
}
}
// If resource history entry has shutdown lock, remember lock node and time
static void
unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
if ((scheduler->priv->shutdown_lock_ms > 0U)
&& (pcmk__scheduler_epoch_time(scheduler)
> (lock_time + pcmk__timeout_ms2s(scheduler->priv->shutdown_lock_ms)))) {
pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pcmk__node_name(node));
pe__clear_resource_history(rsc, node);
} else {
rsc->priv->lock_node = node;
rsc->priv->lock_time = lock_time;
}
}
}
/*!
* \internal
* \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked
* \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
static pcmk_resource_t *
unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = pcmk__xe_id(lrm_resource);
pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
enum pcmk__on_fail on_fail = pcmk__on_fail_ignore;
enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE
" entry: No " PCMK_XA_ID);
crm_log_xml_info(lrm_resource, "missing-id");
return NULL;
}
crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s",
rsc_id, pcmk__node_name(node));
/* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort
* them
*/
for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL,
NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) {
op_list = g_list_prepend(op_list, rsc_op);
}
if (!pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
}
}
/* find the resource */
rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
pcmk__assert(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
if (pcmk_is_set(scheduler->flags, pcmk__sched_shutdown_lock)) {
unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->priv->orig_role;
rsc->priv->orig_role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
if ((rsc->priv->next_role == pcmk_role_unknown)
|| (req_role < rsc->priv->next_role)) {
pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE);
} else if (req_role > rsc->priv->next_role) {
pcmk__rsc_info(rsc,
"%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, pcmk_role_text(rsc->priv->next_role),
pcmk_role_text(req_role));
}
}
if (saved_role > rsc->priv->orig_role) {
rsc->priv->orig_role = saved_role;
}
return rsc;
}
static void
handle_removed_launched_resources(const xmlNode *lrm_rsc_list,
pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
pcmk_resource_t *rsc;
pcmk_resource_t *launcher = NULL;
const char *rsc_id;
const char *launcher_id = NULL;
launcher_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER);
rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
if ((launcher_id == NULL) || (rsc_id == NULL)) {
continue;
}
launcher = pe_find_resource(scheduler->priv->resources, launcher_id);
if (launcher == NULL) {
continue;
}
rsc = pe_find_resource(scheduler->priv->resources, rsc_id);
if ((rsc == NULL) || (rsc->priv->launcher != NULL)
|| !pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) {
continue;
}
pcmk__rsc_trace(rsc, "Mapped launcher of removed resource %s to %s",
rsc->id, launcher_id);
rsc->priv->launcher = launcher;
launcher->priv->launched = g_list_append(launcher->priv->launched,
rsc);
}
}
/*!
* \internal
* \brief Unpack one node's lrm status section
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] xml CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler)
{
bool found_removed_launched_resource = false;
// Drill down to PCMK__XE_LRM_RESOURCES section
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL);
if (xml == NULL) {
return;
}
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL);
if (xml == NULL) {
return;
}
// Unpack each PCMK__XE_LRM_RESOURCE entry
for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
&& pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) {
found_removed_launched_resource = true;
}
}
/* Now that all resource state has been unpacked for this node, map any
* removed launched resources to their launchers.
*/
if (found_removed_launched_resource) {
handle_removed_launched_resources(xml, scheduler);
}
}
static void
set_active(pcmk_resource_t *rsc)
{
const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
if (top && pcmk_is_set(top->flags, pcmk__rsc_promotable)) {
rsc->priv->orig_role = pcmk_role_unpromoted;
} else {
rsc->priv->orig_role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
pcmk_node_t *node = value;
int *score = user_data;
node->assign->score = *score;
}
#define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE
#define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \
"/" PCMK__XE_LRM_RESOURCES \
"/" PCMK__XE_LRM_RESOURCE
#define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']"
SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'",
NULL);
/* Need to check against transition_magic too? */
if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_TARGET "='", source, "']",
NULL);
} else if ((source != NULL)
&& (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_SOURCE "='", source, "']",
NULL);
} else {
g_string_append_c(xpath, ']');
}
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
if (xml && target_rc >= 0) {
int rc = PCMK_OCF_UNKNOWN_ERROR;
int status = PCMK_EXEC_ERROR;
crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc);
crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status);
if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
return NULL;
}
}
return xml;
}
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']",
NULL);
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
return xml;
}
/*!
* \internal
* \brief Check whether a resource has no completed action history on a node
*
* \param[in,out] rsc Resource to check
* \param[in] node_name Node to check
*
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
char *xpath = NULL;
xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"
SUB_XPATH_LRM_RSC_OP
"[@" PCMK__XA_RC_CODE "!='%d']",
node_name, rsc->id, PCMK_OCF_UNKNOWN);
search = xpath_search(rsc->priv->scheduler->input, xpath);
result = (numXpathResults(search) == 0);
freeXpathObject(search);
free(xpath);
return result;
}
/*!
* \internal
* \brief Check whether a probe/monitor indicating the resource was not running
* on a node happened after some event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor != NULL) && (pe__is_newer_op(monitor, xml_op) > 0);
}
/*!
* \internal
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that non-monitor is being compared to
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
op != NULL; op = pcmk__xe_next(op, PCMK__XE_LRM_RSC_OP)) {
const char * task = NULL;
if (op == xml_op) {
continue;
}
task = crm_element_value(op, PCMK_XA_OPERATION);
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
NULL)
&& pe__is_newer_op(op, xml_op) > 0) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] migrate_to Any migrate_to event that is being compared to
* \param[in] migrate_from Any migrate_from event that is being compared to
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = (migrate_from != NULL)? migrate_from : migrate_to;
const char *source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE);
/* It's preferred to compare to the migrate event on the same node if
* existing, since call ids are more reliable.
*/
if ((xml_op != migrate_to) && (migrate_to != NULL)
&& pcmk__str_eq(node_name, source, pcmk__str_casei)) {
xml_op = migrate_to;
}
/* If there's any newer non-monitor operation on the node, or any newer
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
return non_monitor_after(rsc_id, node_name, xml_op, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, scheduler);
}
/*!
* \internal
* \brief Parse migration source and target node names from history entry
*
* \param[in] entry Resource history entry for a migration action
* \param[in] source_node If not NULL, source must match this node
* \param[in] target_node If not NULL, target must match this node
* \param[out] source_name Where to store migration source node name
* \param[out] target_name Where to store migration target node name
*
* \return Standard Pacemaker return code
*/
static int
get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE);
*target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET);
if ((*source_name == NULL) || (*target_name == NULL)) {
pcmk__config_err("Ignoring resource history entry %s without "
PCMK__META_MIGRATE_SOURCE " and "
PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry));
return pcmk_rc_unpack_error;
}
if ((source_node != NULL)
&& !pcmk__str_eq(*source_name, source_node->priv->name,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_SOURCE "='%s' does not match %s",
pcmk__xe_id(entry), *source_name,
pcmk__node_name(source_node));
return pcmk_rc_unpack_error;
}
if ((target_node != NULL)
&& !pcmk__str_eq(*target_name, target_node->priv->name,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_TARGET "='%s' does not match %s",
pcmk__xe_id(entry), *target_name,
pcmk__node_name(target_node));
return pcmk_rc_unpack_error;
}
return pcmk_rc_ok;
}
/*
* \internal
* \brief Add a migration source to a resource's list of dangling migrations
*
* If the migrate_to and migrate_from actions in a live migration both
* succeeded, but there is no stop on the source, the migration is considered
* "dangling." Add the source to the resource's dangling migration list, which
* will be used to schedule a stop on the source without affecting the target.
*
* \param[in,out] rsc Resource involved in migration
* \param[in] node Migration source
*/
static void
add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pcmk__node_name(node));
rsc->priv->orig_role = pcmk_role_stopped;
rsc->priv->dangling_migration_sources =
g_list_prepend(rsc->priv->dangling_migration_sources,
(gpointer) node);
}
/*!
* \internal
* \brief Update resource role etc. after a successful migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_success(struct action_history *history)
{
/* A complete migration sequence is:
* 1. migrate_to on source node (which succeeded if we get to this function)
* 2. migrate_from on target node
* 3. stop on source node
*
* If no migrate_from has happened, the migration is considered to be
* "partial". If the migrate_from succeeded but no stop has happened, the
* migration is considered to be "dangling".
*
* If a successful migrate_to and stop have happened on the source node, we
* still need to check for a partial migration, due to scenarios (easier to
* produce with batch-limit=1) like:
*
* - A resource is migrating from node1 to node2, and a migrate_to is
* initiated for it on node1.
*
* - node2 goes into standby mode while the migrate_to is pending, which
* aborts the transition.
*
* - Upon completion of the migrate_to, a new transition schedules a stop
* on both nodes and a start on node1.
*
* - If the new transition is aborted for any reason while the resource is
* stopping on node1, the transition after that stop completes will see
* the migrate_to and stop on the source, but it's still a partial
* migration, and the resource must be stopped on node2 because it is
* potentially active there due to the migrate_to.
*
* We also need to take into account that either node's history may be
* cleared at any point in the migration process.
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
bool source_newer_op = false;
bool target_newer_state = false;
bool active_on_target = false;
pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
// Check for newer state on the source
source_newer_op = non_monitor_after(history->rsc->id, source, history->xml,
scheduler);
// Check for a migrate_from action from this source on the target
migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
target, source, -1, scheduler);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
* migrate_from on the target, so this migrate_to is irrelevant to
* the resource's state.
*/
return;
}
crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc);
crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status);
}
/* If the resource has newer state on both the source and target after the
* migration events, this migrate_to is irrelevant to the resource's state.
*/
target_newer_state = newer_state_after_migrate(history->rsc->id, target,
history->xml, migrate_from,
scheduler);
if (source_newer_op && target_newer_state) {
return;
}
/* Check for dangling migration (migrate_from succeeded but stop not done).
* We know there's no stop because we already returned if the target has a
* migrate_from and the source has any newer non-monitor operation.
*/
if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
add_dangling_migration(history->rsc, history->node);
return;
}
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
history->rsc->priv->orig_role = pcmk_role_started;
target_node = pcmk_find_node(scheduler, target);
active_on_target = !target_newer_state && (target_node != NULL)
&& target_node->details->online;
if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
if (active_on_target) {
native_add_running(history->rsc, target_node, scheduler, TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable);
}
return;
}
// The migrate_from is pending, complete but erased, or to be scheduled
/* If there is no history at all for the resource on an online target, then
* it was likely cleaned. Just return, and we'll schedule a probe. Once we
* have the probe result, it will be reflected in target_newer_state.
*/
if ((target_node != NULL) && target_node->details->online
&& unknown_on_node(history->rsc, target)) {
return;
}
if (active_on_target) {
pcmk_node_t *source_node = pcmk_find_node(scheduler, source);
native_add_running(history->rsc, target_node, scheduler, FALSE);
if ((source_node != NULL) && source_node->details->online) {
/* This is a partial migration: the migrate_to completed
* successfully on the source, but the migrate_from has not
* completed. Remember the source and target; if the newly
* chosen target remains the same when we schedule actions
* later, we may continue with the migration.
*/
history->rsc->priv->partial_migration_target = target_node;
history->rsc->priv->partial_migration_source = source_node;
}
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_failure(struct action_history *history)
{
xmlNode *target_migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->priv->orig_role = pcmk_role_started;
// Check for migrate_from on the target
target_migrate_from = find_lrm_op(history->rsc->id,
PCMK_ACTION_MIGRATE_FROM, target, source,
PCMK_OCF_OK, scheduler);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, target)
/* If the resource has newer state on the target after the migration
* events, this migrate_to no longer matters for the target.
*/
&& !newer_state_after_migrate(history->rsc->id, target, history->xml,
target_migrate_from, scheduler)) {
/* The resource has no newer state on the target, so assume it's still
* active there.
* (if it is up).
*/
pcmk_node_t *target_node = pcmk_find_node(scheduler, target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, scheduler, FALSE);
}
} else if (!non_monitor_after(history->rsc->id, source, history->xml,
scheduler)) {
/* We know the resource has newer state on the target, but this
* migrate_to still matters for the source as long as there's no newer
* non-monitor operation there.
*/
// Mark node as having dangling migration so we can force a stop later
history->rsc->priv->dangling_migration_sources =
g_list_prepend(history->rsc->priv->dangling_migration_sources,
(gpointer) history->node);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_from action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_from_failure(struct action_history *history)
{
xmlNode *source_migrate_to = NULL;
const char *source = NULL;
const char *target = NULL;
pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, NULL, history->node, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->priv->orig_role = pcmk_role_started;
// Check for a migrate_to on the source
source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK, scheduler);
if (/* If the resource state is unknown on the source, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, source)
/* If the resource has newer state on the source after the migration
* events, this migrate_from no longer matters for the source.
*/
&& !newer_state_after_migrate(history->rsc->id, source,
source_migrate_to, history->xml,
scheduler)) {
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
pcmk_node_t *source_node = pcmk_find_node(scheduler, source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, scheduler, TRUE);
}
}
}
/*!
* \internal
* \brief Add an action to cluster's list of failed actions
*
* \param[in,out] history Parsed action result history
*/
static void
record_failed_op(struct action_history *history)
{
const pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler;
if (!(history->node->details->online)) {
return;
}
for (const xmlNode *xIter = scheduler->priv->failed->children;
xIter != NULL; xIter = xIter->next) {
const char *key = pcmk__xe_history_key(xIter);
const char *uname = crm_element_value(xIter, PCMK_XA_UNAME);
if (pcmk__str_eq(history->key, key, pcmk__str_none)
&& pcmk__str_eq(uname, history->node->priv->name,
pcmk__str_casei)) {
crm_trace("Skipping duplicate entry %s on %s",
history->key, pcmk__node_name(history->node));
return;
}
}
crm_trace("Adding entry for %s on %s to failed action list",
history->key, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->priv->name);
crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id);
pcmk__xml_copy(scheduler->priv->failed, history->xml);
}
static char *
last_change_str(const xmlNode *xml_op)
{
time_t when;
char *result = NULL;
if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE,
&when) == pcmk_ok) {
char *when_s = pcmk__epoch2str(&when, 0);
const char *p = strchr(when_s, ' ');
// Skip day of week to make message shorter
if ((p != NULL) && (*(++p) != '\0')) {
result = pcmk__str_copy(p);
}
free(when_s);
}
if (result == NULL) {
result = pcmk__str_copy("unknown_time");
}
return result;
}
/*!
* \internal
* \brief Ban a resource (or its clone if an anonymous instance) from all nodes
*
* \param[in,out] rsc Resource to ban
*/
static void
ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -PCMK_SCORE_INFINITY;
const pcmk_scheduler_t *scheduler = rsc->priv->scheduler;
if (rsc->priv->parent != NULL) {
pcmk_resource_t *parent = uber_parent(rsc);
if (pcmk__is_anonymous_clone(parent)) {
/* For anonymous clones, if an operation with
* PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the
* entire clone must stop.
*/
rsc = parent;
}
}
// Ban the resource from all nodes
crm_notice("%s will not be started under current conditions", rsc->id);
if (rsc->priv->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->priv->allowed_nodes);
}
rsc->priv->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_foreach(rsc->priv->allowed_nodes, set_node_score, &score);
}
/*!
* \internal
* \brief Get configured failure handling and role after failure for an action
*
* \param[in,out] history Unpacked action history entry
* \param[out] on_fail Where to set configured failure handling
* \param[out] fail_role Where to set to role after failure
*/
static void
unpack_failure_handling(struct action_history *history,
enum pcmk__on_fail *on_fail,
enum rsc_role_e *fail_role)
{
xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
history->interval_ms, true);
GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
history->task,
history->interval_ms, config);
const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
*on_fail = pcmk__parse_on_fail(history->rsc, history->task,
history->interval_ms, on_fail_str);
*fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
meta);
g_hash_table_destroy(meta);
}
/*!
* \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
* \param[in,out] history Parsed action result history
* \param[in] config_on_fail Action failure handling from configuration
* \param[in] fail_role Resource's role after failure of this action
* \param[out] last_failure This will be set to the history XML
* \param[in,out] on_fail Actual handling of action result
*/
static void
unpack_rsc_op_failure(struct action_history *history,
enum pcmk__on_fail config_on_fail,
enum rsc_role_e fail_role, xmlNode **last_failure,
enum pcmk__on_fail *on_fail)
{
bool is_probe = false;
char *last_change_s = NULL;
pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler;
*last_failure = history->xml;
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
if (!pcmk_is_set(scheduler->flags, pcmk__sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " QB_XS " exit-status=%d id=%s",
crm_exit_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
} else {
pcmk__sched_warn(scheduler,
"Unexpected result (%s%s%s) was recorded for %s of "
"%s on %s at %s " QB_XS " exit-status=%d id=%s",
crm_exit_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
if (is_probe && (history->exit_status != PCMK_OCF_OK)
&& (history->exit_status != PCMK_OCF_NOT_RUNNING)
&& (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) {
/* A failed (not just unexpected) probe result could mean the user
* didn't know resources will be probed even where they can't run.
*/
crm_notice("If it is not possible for %s to run on %s, see "
"the " PCMK_XA_RESOURCE_DISCOVERY " option for location "
"constraints",
history->rsc->id, pcmk__node_name(history->node));
}
record_failed_op(history);
}
free(last_change_s);
if (*on_fail < config_on_fail) {
pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s",
pcmk__on_fail_text(*on_fail),
pcmk__on_fail_text(config_on_fail), history->key);
*on_fail = config_on_fail;
}
if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY,
"__stop_fail__", scheduler);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->priv->orig_role = pcmk_role_promoted;
} else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
if (config_on_fail == pcmk__on_fail_block) {
history->rsc->priv->orig_role = pcmk_role_promoted;
pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with " PCMK_META_ON_FAIL "=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
history->rsc->priv->orig_role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
* controller into a loop. Setting the role to unpromoted is not
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
history->rsc->priv->orig_role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
history->rsc->priv->orig_role = pcmk_role_stopped;
} else if (history->rsc->priv->orig_role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
pcmk__rsc_trace(history->rsc,
"Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s",
history->rsc->id,
pcmk_role_text(history->rsc->priv->orig_role),
pcmk__btoa(history->node->details->unclean),
pcmk__on_fail_text(config_on_fail),
pcmk_role_text(fail_role));
if ((fail_role != pcmk_role_started)
&& (history->rsc->priv->next_role < fail_role)) {
pe__set_next_role(history->rsc, fail_role, "failure");
}
if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
}
/*!
* \internal
* \brief Block a resource with a failed action if it cannot be recovered
*
* If resource action is a failed stop and fencing is not possible, mark the
* resource as unmanaged and blocked, since recovery cannot be done.
*
* \param[in,out] history Parsed action history entry
*/
static void
block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->priv->scheduler, history->node)) {
return; // Failed stops are recoverable via fencing
}
last_change_s = last_change_str(history->xml);
pcmk__sched_err(history->node->priv->scheduler,
"No further recovery can be attempted for %s "
"because %s on %s failed (%s%s%s) at %s "
QB_XS " rc=%d id=%s",
history->rsc->id, history->task,
pcmk__node_name(history->node),
crm_exit_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
last_change_s, history->exit_status, history->id);
free(last_change_s);
pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_managed);
pcmk__set_rsc_flags(history->rsc, pcmk__rsc_blocked);
}
/*!
* \internal
* \brief Update action history's execution status and why
*
* \param[in,out] history Parsed action history entry
* \param[out] why Where to store reason for update
* \param[in] value New value
* \param[in] reason Description of why value was changed
*/
static inline void
remap_because(struct action_history *history, const char **why, int value,
const char *reason)
{
if (history->execution_status != value) {
history->execution_status = value;
*why = reason;
}
}
/*!
* \internal
* \brief Remap informational monitor results and operation status
*
* For the monitor results, certain OCF codes are for providing extended information
* to the user about services that aren't yet failed but not entirely healthy either.
* These must be treated as the "normal" result by Pacemaker.
*
* For operation status, the action result can be used to determine an appropriate
* status for the purposes of responding to the action. The status provided by the
* executor is not directly usable since the executor does not know what was expected.
*
* \param[in,out] history Parsed action history entry
* \param[in,out] on_fail What should be done about the result
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
* the operation will be recorded in the scheduler data's list of failed
* operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
static void
remap_operation(struct action_history *history,
enum pcmk__on_fail *on_fail, bool expired)
{
bool is_probe = false;
int orig_exit_status = history->exit_status;
int orig_exec_status = history->execution_status;
const char *why = NULL;
const char *task = history->task;
// Remap degraded results to their successful counterparts
history->exit_status = pcmk__effective_rc(history->exit_status);
if (history->exit_status != orig_exit_status) {
why = "degraded result";
if (!expired && (!history->node->details->shutdown
|| history->node->details->online)) {
record_failed_op(history);
}
}
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& ((history->execution_status != PCMK_EXEC_DONE)
|| (history->exit_status != PCMK_OCF_NOT_RUNNING))) {
history->execution_status = PCMK_EXEC_DONE;
history->exit_status = PCMK_OCF_NOT_RUNNING;
why = "equivalent probe result";
}
/* If the executor reported an execution status of anything but done or
* error, consider that final. But for done or error, we know better whether
* it should be treated as a failure or not, because we know the expected
* result.
*/
switch (history->execution_status) {
case PCMK_EXEC_DONE:
case PCMK_EXEC_ERROR:
break;
// These should be treated as node-fatal
case PCMK_EXEC_NO_FENCE_DEVICE:
case PCMK_EXEC_NO_SECRETS:
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"node-fatal error");
goto remap_done;
default:
goto remap_done;
}
is_probe = pcmk_xe_is_probe(history->xml);
if (is_probe) {
task = "probe";
}
if (history->expected_exit_status < 0) {
/* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
* Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
* expected exit status in the transition key, which (along with the
* similar case of a corrupted transition key in the CIB) will be
* reported to this function as -1. Pacemaker 2.0+ does not support
* rolling upgrades from those versions or processing of saved CIB files
* from those versions, so we do not need to care much about this case.
*/
remap_because(history, &why, PCMK_EXEC_ERROR,
"obsolete history format");
pcmk__config_warn("Expected result not found for %s on %s "
"(corrupt or obsolete CIB?)",
history->key, pcmk__node_name(history->node));
} else if (history->exit_status == history->expected_exit_status) {
remap_because(history, &why, PCMK_EXEC_DONE, "expected result");
} else {
remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result");
pcmk__rsc_debug(history->rsc,
"%s on %s: expected %d (%s), got %d (%s%s%s)",
history->key, pcmk__node_name(history->node),
history->expected_exit_status,
crm_exit_str(history->expected_exit_status),
history->exit_status,
crm_exit_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""));
}
switch (history->exit_status) {
case PCMK_OCF_OK:
if (is_probe
&& (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active on %s at %s",
history->rsc->id, pcmk__node_name(history->node),
last_change_s);
free(last_change_s);
}
break;
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
|| !pcmk_is_set(history->rsc->flags, pcmk__rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
history->rsc->priv->orig_role = pcmk_role_stopped;
*on_fail = pcmk__on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
case PCMK_OCF_RUNNING_PROMOTED:
if (is_probe
&& (history->exit_status != history->expected_exit_status)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active and promoted on %s at %s",
history->rsc->id,
pcmk__node_name(history->node), last_change_s);
free(last_change_s);
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
history->rsc->priv->orig_role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
history->rsc->priv->orig_role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
case PCMK_OCF_NOT_CONFIGURED:
remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
break;
case PCMK_OCF_UNIMPLEMENT_FEATURE:
{
guint interval_ms = 0;
crm_element_value_ms(history->xml, PCMK_META_INTERVAL,
&interval_ms);
if (interval_ms == 0) {
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"exit status");
} else {
remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED,
"exit status");
}
}
break;
case PCMK_OCF_NOT_INSTALLED:
case PCMK_OCF_INVALID_PARAM:
case PCMK_OCF_INSUFFICIENT_PRIV:
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status");
break;
default:
if (history->execution_status == PCMK_EXEC_DONE) {
char *last_change_s = last_change_str(history->xml);
crm_info("Treating unknown exit status %d from %s of %s "
"on %s at %s as failure",
history->exit_status, task, history->rsc->id,
pcmk__node_name(history->node), last_change_s);
remap_because(history, &why, PCMK_EXEC_ERROR,
"unknown exit status");
free(last_change_s);
}
break;
}
remap_done:
if (why != NULL) {
pcmk__rsc_trace(history->rsc,
"Remapped %s result from [%s: %s] to [%s: %s] "
"because of %s",
history->key, pcmk_exec_status_str(orig_exec_status),
crm_exit_str(orig_exit_status),
pcmk_exec_status_str(history->execution_status),
crm_exit_str(history->exit_status), why);
}
}
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
pcmk_resource_t *rsc, pcmk_node_t *node)
{
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
- pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
- rsc->priv->scheduler);
+ pcmk__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
+ rsc->priv->scheduler);
} else {
pcmk__op_digest_t *digest_data = NULL;
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->priv->scheduler);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pcmk__xe_history_key(xml_op),
node->priv->id);
break;
case pcmk__digest_match:
break;
default:
return TRUE;
}
}
}
return FALSE;
}
// Order action after fencing of remote node, given connection rsc
static void
order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
pcmk_scheduler_t *scheduler)
{
pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id);
if (remote_node) {
pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
FALSE, scheduler);
order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
* executor reports only changes in the monitor result, so if the
* monitor is still active and still getting the same failure result,
* that will go undetected after the failure is cleared.
*
* Also, the operation history will have the time when the recurring
* monitor result changed to the given code, not the time when the
* result last happened.
*
* @TODO We probably should clear such failures only when the failure
* timeout has passed since the last occurrence of the failed result.
* However we don't record that information. We could maybe approximate
* that by clearing only if there is a more recent successful monitor or
* stop result, but we don't even have that information at this point
* since we are still unpacking the resource's operation history.
*
* This is especially important for remote connection resources with a
* reconnect interval, so in that case, we skip clearing failures
* if the remote node hasn't been fenced.
*/
if ((rsc->priv->remote_reconnect_ms > 0U)
&& pcmk_is_set(rsc->priv->scheduler->flags,
pcmk__sched_fencing_enabled)
&& (interval_ms != 0)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pcmk_node_t *remote_node = pcmk_find_node(rsc->priv->scheduler,
rsc->id);
if (remote_node && !pcmk_is_set(remote_node->priv->flags,
pcmk__node_remote_fenced)) {
if (is_last_failure) {
crm_info("Waiting to clear monitor failure for remote node %s"
" until fencing has occurred", rsc->id);
}
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Check operation age and schedule failure clearing when appropriate
*
* This function has two distinct purposes. The first is to check whether an
* operation history entry is expired (i.e. the resource has a failure timeout,
* the entry is older than the timeout, and the resource either has no fail
* count or its fail count is entirely older than the timeout). The second is to
* schedule fail count clearing when appropriate (i.e. the operation is expired
* and either the resource has an expired fail count or the operation is a
* last_failure for a remote connection resource with a reconnect interval,
* or the operation is a last_failure for a start or monitor operation and the
* resource's parameters have changed since the operation).
*
* \param[in,out] history Parsed action result history
*
* \return true if operation history entry is expired, otherwise false
*/
static bool
check_operation_expiry(struct action_history *history)
{
bool expired = false;
bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0");
time_t last_run = 0;
int unexpired_fail_count = 0;
const char *clear_reason = NULL;
const guint expiration_sec =
pcmk__timeout_ms2s(history->rsc->priv->failure_expiration_ms);
pcmk_scheduler_t *scheduler = history->rsc->priv->scheduler;
if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) {
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not expired: "
"Not Installed does not expire",
history->id, pcmk__node_name(history->node));
return false; // "Not installed" must always be cleared manually
}
if ((expiration_sec > 0)
&& (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE,
&last_run) == 0)) {
/* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a
* timestamp
*/
time_t now = pcmk__scheduler_epoch_time(scheduler);
time_t last_failure = 0;
// Is this particular operation history older than the failure timeout?
if ((now >= (last_run + expiration_sec))
&& !should_ignore_failure_timeout(history->rsc, history->task,
history->interval_ms,
is_last_failure)) {
expired = true;
}
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
&last_failure,
pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d "
"expiration=%s last-failure@%lld",
history->id, (long long) last_run, (expired? "" : "not "),
(long long) now, unexpired_fail_count,
pcmk__readable_interval(expiration_sec * 1000),
(long long) last_failure);
last_failure += expiration_sec + 1;
if (unexpired_fail_count && (now < last_failure)) {
pe__update_recheck_time(last_failure, scheduler,
"fail count expiration");
}
}
if (expired) {
if (pe_get_failcount(history->node, history->rsc, NULL,
pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
// There is no fail count considering timeout
clear_reason = "it expired";
} else {
/* This operation is old, but there is an unexpired fail count.
* In a properly functioning cluster, this should only be
* possible if this operation is not a failure (otherwise the
* fail count should be expired too), so this is really just a
* failsafe.
*/
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Unexpired fail count",
history->id, pcmk__node_name(history->node));
expired = false;
}
} else if (is_last_failure
&& (history->rsc->priv->remote_reconnect_ms > 0U)) {
/* Clear any expired last failure when reconnect interval is set,
* even if there is no fail count.
*/
clear_reason = "reconnect interval is set";
}
}
if (!expired && is_last_failure
&& should_clear_for_param_change(history->xml, history->task,
history->rsc, history->node)) {
clear_reason = "resource parameters have changed";
}
if (clear_reason != NULL) {
pcmk_action_t *clear_op = NULL;
// Schedule clearing of the fail count
clear_op = pe__clear_failcount(history->rsc, history->node,
clear_reason, scheduler);
if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)
&& (history->rsc->priv->remote_reconnect_ms > 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
* completes.
*
* We could limit this to remote_node->details->unclean, but at
* this point, that's always true (it won't be reliable until
* after unpack_node_history() is done).
*/
crm_info("Clearing %s failure will wait until any scheduled "
"fencing of %s completes",
history->task, history->rsc->id);
order_after_remote_fencing(clear_op, history->rsc, scheduler);
}
}
if (expired && (history->interval_ms == 0)
&& pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_PROMOTED:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_PROMOTED:
// Don't expire probes that return these values
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Probe result",
history->id, pcmk__node_name(history->node));
expired = false;
break;
}
}
return expired;
}
int
pe__target_rc_from_xml(const xmlNode *xml_op)
{
int target_rc = 0;
const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY);
if (key == NULL) {
return -1;
}
decode_transition_key(key, NULL, NULL, NULL, &target_rc);
return target_rc;
}
/*!
* \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
* \param[in] exit_status Exit status to base new state on
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
update_resource_state(struct action_history *history, int exit_status,
const xmlNode *last_failure,
enum pcmk__on_fail *on_fail)
{
bool clear_past_failure = false;
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
history->rsc->priv->orig_role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
if (history->rsc->priv->orig_role < pcmk_role_started) {
set_active(history->rsc);
}
} else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
history->rsc->priv->orig_role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
history->rsc->priv->orig_role = pcmk_role_stopped;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
history->rsc->priv->orig_role = pcmk_role_promoted;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
if (*on_fail == pcmk__on_fail_demote) {
/* Demote clears an error only if
* PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
*/
clear_past_failure = true;
}
history->rsc->priv->orig_role = pcmk_role_unpromoted;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
history->rsc->priv->orig_role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
} else if (history->rsc->priv->orig_role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pcmk__node_name(history->node));
set_active(history->rsc);
}
if (!clear_past_failure) {
return;
}
switch (*on_fail) {
case pcmk__on_fail_stop:
case pcmk__on_fail_ban:
case pcmk__on_fail_standby_node:
case pcmk__on_fail_fence_node:
pcmk__rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, pcmk__on_fail_text(*on_fail),
history->task);
break;
case pcmk__on_fail_block:
case pcmk__on_fail_ignore:
case pcmk__on_fail_demote:
case pcmk__on_fail_restart:
case pcmk__on_fail_restart_container:
*on_fail = pcmk__on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
case pcmk__on_fail_reset_remote:
if (history->rsc->priv->remote_reconnect_ms == 0U) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
* completely stopped. (With a reconnect interval, we wait
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
*on_fail = pcmk__on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
}
}
/*!
* \internal
* \brief Check whether a given history entry matters for resource state
*
* \param[in] history Parsed action history entry
*
* \return true if action can affect resource state, otherwise false
*/
static inline bool
can_affect_state(struct action_history *history)
{
return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
}
/*!
* \internal
* \brief Unpack execution/exit status and exit reason from a history entry
*
* \param[in,out] history Action history entry to unpack
*
* \return Standard Pacemaker return code
*/
static int
unpack_action_result(struct action_history *history)
{
if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS,
&(history->execution_status)) < 0)
|| (history->execution_status < PCMK_EXEC_PENDING)
|| (history->execution_status > PCMK_EXEC_MAX)
|| (history->execution_status == PCMK_EXEC_CANCELLED)) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_OP_STATUS " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_OP_STATUS),
""));
return pcmk_rc_unpack_error;
}
if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE,
&(history->exit_status)) < 0)
|| (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_RC_CODE " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_RC_CODE),
""));
return pcmk_rc_unpack_error;
}
history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Process an action history entry whose result expired
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
*
* \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the
* entry needs no further processing)
*/
static int
process_expired_result(struct action_history *history, int orig_exit_status)
{
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
if (history->rsc->priv->orig_role <= pcmk_role_stopped) {
history->rsc->priv->orig_role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
history->id, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->exit_status == history->expected_exit_status) {
return pcmk_rc_undetermined; // Only failures expire
}
if (history->interval_ms == 0) {
crm_notice("Ignoring resource history entry %s for %s of %s on %s: "
"Expired failure",
history->id, history->task, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->node->details->online && !history->node->details->unclean) {
/* Reschedule the recurring action. schedule_cancel() won't work at
* this stage, so as a hacky workaround, forcibly change the restart
* digest so pcmk__check_action_config() does what we want later.
*
* @TODO We should skip this if there is a newer successful monitor.
* Also, this causes rescheduling only if the history entry
* has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure
* scheduler regression test doesn't, but that may not be a
* realistic scenario in production).
*/
crm_notice("Rescheduling %s-interval %s of %s on %s "
"after failure expired",
pcmk__readable_interval(history->interval_ms), history->task,
history->rsc->id, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST,
"calculated-failure-timeout");
return pcmk_rc_ok;
}
return pcmk_rc_undetermined;
}
/*!
* \internal
* \brief Process a masked probe failure
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum pcmk__on_fail *on_fail)
{
pcmk_resource_t *ban_rsc = history->rsc;
if (!pcmk_is_set(history->rsc->flags, pcmk__rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
crm_exit_str(orig_exit_status), history->rsc->id,
pcmk__node_name(history->node));
update_resource_state(history, history->expected_exit_status, last_failure,
on_fail);
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->priv->name);
record_failed_op(history);
resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY,
"masked-probe-failure", ban_rsc->priv->scheduler);
}
/*!
* \internal Check whether a given failure is for a given pending action
*
* \param[in] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*
* \return true if \p last_failure is failure of pending action in \p history,
* otherwise false
* \note Both \p history and \p last_failure must come from the same
* \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be
* the same.
*/
static bool
failure_is_newer(const struct action_history *history,
const xmlNode *last_failure)
{
guint failure_interval_ms = 0U;
long long failure_change = 0LL;
long long this_change = 0LL;
if (last_failure == NULL) {
return false; // Resource has no last_failure entry
}
if (!pcmk__str_eq(history->task,
crm_element_value(last_failure, PCMK_XA_OPERATION),
pcmk__str_none)) {
return false; // last_failure is for different action
}
if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL,
&failure_interval_ms) != pcmk_ok)
|| (history->interval_ms != failure_interval_ms)) {
return false; // last_failure is for action with different interval
}
if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE),
&this_change, 0LL) != pcmk_rc_ok)
|| (pcmk__scan_ll(crm_element_value(last_failure,
PCMK_XA_LAST_RC_CHANGE),
&failure_change, 0LL) != pcmk_rc_ok)
|| (failure_change < this_change)) {
return false; // Failure is not known to be newer
}
return true;
}
/*!
* \internal
* \brief Update a resource's role etc. for a pending action
*
* \param[in,out] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*/
static void
process_pending_action(struct action_history *history,
const xmlNode *last_failure)
{
/* For recurring monitors, a failure is recorded only in RSC_last_failure_0,
* and there might be a RSC_monitor_INTERVAL entry with the last successful
* or pending result.
*
* If last_failure contains the failure of the pending recurring monitor
* we're processing here, and is newer, the action is no longer pending.
* (Pending results have call ID -1, which sorts last, so the last failure
* if any should be known.)
*/
if (failure_is_newer(history, last_failure)) {
return;
}
if (strcmp(history->task, PCMK_ACTION_START) == 0) {
pcmk__set_rsc_flags(history->rsc, pcmk__rsc_start_pending);
set_active(history->rsc);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->priv->orig_role = pcmk_role_promoted;
} else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
PCMK__META_MIGRATE_TARGET);
target = pcmk_find_node(history->rsc->priv->scheduler,
migrate_target);
if (target != NULL) {
stop_action(history->rsc, target, FALSE);
}
}
if (history->rsc->priv->pending_action != NULL) {
/* There should never be multiple pending actions, but as a failsafe,
* just remember the first one processed for display purposes.
*/
return;
}
if (pcmk_is_probe(history->task, history->interval_ms)) {
/* Pending probes are currently never displayed, even if pending
* operations are requested. If we ever want to change that,
* enable the below and the corresponding part of
* native.c:native_pending_action().
*/
#if 0
history->rsc->private->pending_action = strdup("probe");
history->rsc->private->pending_node = history->node;
#endif
} else {
history->rsc->priv->pending_action = strdup(history->task);
history->rsc->priv->pending_node = history->node;
}
}
static void
unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum pcmk__on_fail *on_fail)
{
int old_rc = 0;
bool expired = false;
pcmk_resource_t *parent = rsc;
enum rsc_role_e fail_role = pcmk_role_unknown;
enum pcmk__on_fail failure_strategy = pcmk__on_fail_restart;
struct action_history history = {
.rsc = rsc,
.node = node,
.xml = xml_op,
.execution_status = PCMK_EXEC_UNKNOWN,
};
CRM_CHECK(rsc && node && xml_op, return);
history.id = pcmk__xe_id(xml_op);
if (history.id == NULL) {
pcmk__config_err("Ignoring resource history entry for %s on %s "
"without ID", rsc->id, pcmk__node_name(node));
return;
}
// Task and interval
history.task = crm_element_value(xml_op, PCMK_XA_OPERATION);
if (history.task == NULL) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"without " PCMK_XA_OPERATION,
history.id, rsc->id, pcmk__node_name(node));
return;
}
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms));
if (!can_affect_state(&history)) {
pcmk__rsc_trace(rsc,
"Ignoring resource history entry %s for %s on %s "
"with irrelevant action '%s'",
history.id, rsc->id, pcmk__node_name(node),
history.task);
return;
}
if (unpack_action_result(&history) != pcmk_rc_ok) {
return; // Error already logged
}
history.expected_exit_status = pe__target_rc_from_xml(xml_op);
history.key = pcmk__xe_history_key(xml_op);
crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id));
pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
history.id, history.task, history.call_id,
pcmk__node_name(node),
pcmk_exec_status_str(history.execution_status),
crm_exit_str(history.exit_status));
if (node->details->unclean) {
pcmk__rsc_trace(rsc,
"%s is running on %s, which is unclean (further action "
"depends on value of stop's on-fail attribute)",
rsc->id, pcmk__node_name(node));
}
expired = check_operation_expiry(&history);
old_rc = history.exit_status;
remap_operation(&history, on_fail, expired);
if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) {
goto done;
}
if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
mask_probe_failure(&history, old_rc, *last_failure, on_fail);
goto done;
}
if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
parent = uber_parent(rsc);
}
switch (history.execution_status) {
case PCMK_EXEC_PENDING:
process_pending_action(&history, *last_failure);
goto done;
case PCMK_EXEC_DONE:
update_resource_state(&history, history.exit_status, *last_failure,
on_fail);
goto done;
case PCMK_EXEC_NOT_INSTALLED:
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if (failure_strategy == pcmk__on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
QB_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pcmk__node_name(node),
history.execution_status, history.exit_status,
history.id);
/* Also for printing it as "FAILED" by marking it as
* pcmk__rsc_failed later
*/
*on_fail = pcmk__on_fail_ban;
}
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->priv->scheduler);
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pcmk__is_pacemaker_remote_node(node)
&& pcmk_is_set(node->priv->remote->flags,
pcmk__rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
* fail-safe in case a bug or unusual circumstances do lead to
* that, ensure the remote connection is considered failed.
*/
pcmk__set_rsc_flags(node->priv->remote,
pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
}
break; // Not done, do error handling
case PCMK_EXEC_ERROR:
case PCMK_EXEC_ERROR_HARD:
case PCMK_EXEC_ERROR_FATAL:
case PCMK_EXEC_TIMEOUT:
case PCMK_EXEC_NOT_SUPPORTED:
case PCMK_EXEC_INVALID:
break; // Not done, do error handling
default: // No other value should be possible at this point
break;
}
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if ((failure_strategy == pcmk__on_fail_ignore)
|| ((failure_strategy == pcmk__on_fail_restart_container)
&& (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded "
QB_XS " %s",
history.task, crm_exit_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), rsc->id,
pcmk__node_name(node), last_change_s, history.id);
free(last_change_s);
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, PCMK_XA_UNAME, node->priv->name);
pcmk__set_rsc_flags(rsc, pcmk__rsc_ignore_failure);
record_failed_op(&history);
if ((failure_strategy == pcmk__on_fail_restart_container)
&& (*on_fail <= pcmk__on_fail_restart)) {
*on_fail = failure_strategy;
}
} else {
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
if (history.exit_status == PCMK_OCF_NOT_INSTALLED) {
log_level = LOG_NOTICE;
}
do_crm_log(log_level,
"Preventing %s from restarting on %s because "
"of hard failure (%s%s%s) " QB_XS " %s",
parent->id, pcmk__node_name(node),
crm_exit_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->priv->scheduler);
} else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) {
pcmk__sched_err(rsc->priv->scheduler,
"Preventing %s from restarting anywhere because "
"of fatal failure (%s%s%s) " QB_XS " %s",
parent->id, crm_exit_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, NULL, -PCMK_SCORE_INFINITY,
"fatal-error", rsc->priv->scheduler);
}
}
done:
pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
rsc->id, pcmk__node_name(node), history.id,
pcmk_role_text(rsc->priv->orig_role),
pcmk_role_text(rsc->priv->next_role));
}
/*!
* \internal
* \brief Insert a node attribute with value into a \c GHashTable
*
* \param[in,out] key Key to insert (either freed or owned by
* \p user_data upon return)
* \param[in] value Value to insert (owned by \p user_data upon return)
* \param[in] user_data \c GHashTable to insert into
*/
static gboolean
insert_attr(gpointer key, gpointer value, gpointer user_data)
{
GHashTable *table = user_data;
g_hash_table_insert(table, key, value);
return TRUE;
}
static void
add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
const char *dc_id = crm_element_value(scheduler->input, PCMK_XA_DC_UUID);
const pcmk_rule_input_t rule_input = {
.now = scheduler->priv->now,
};
pcmk__insert_dup(node->priv->attrs,
CRM_ATTR_UNAME, node->priv->name);
pcmk__insert_dup(node->priv->attrs, CRM_ATTR_ID, node->priv->id);
if ((scheduler->dc_node == NULL)
&& pcmk__str_eq(node->priv->id, dc_id, pcmk__str_casei)) {
scheduler->dc_node = node;
pcmk__insert_dup(node->priv->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_TRUE);
} else if (!pcmk__same_node(node, scheduler->dc_node)) {
pcmk__insert_dup(node->priv->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_FALSE);
}
cluster_name = g_hash_table_lookup(scheduler->priv->options,
PCMK_OPT_CLUSTER_NAME);
if (cluster_name) {
pcmk__insert_dup(node->priv->attrs, CRM_ATTR_CLUSTER_NAME,
cluster_name);
}
if (overwrite) {
/* @TODO Try to reorder some unpacking so that we don't need the
* overwrite argument or to unpack into a temporary table
*/
GHashTable *unpacked = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES,
&rule_input, unpacked, NULL, scheduler);
g_hash_table_foreach_steal(unpacked, insert_attr, node->priv->attrs);
g_hash_table_destroy(unpacked);
} else {
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES,
&rule_input, node->priv->attrs, NULL,
scheduler);
}
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_input,
node->priv->utilization, NULL, scheduler);
if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL,
pcmk__rsc_node_current) == NULL) {
const char *site_name = pcmk__node_attr(node, "site-name", NULL,
pcmk__rsc_node_current);
if (site_name) {
pcmk__insert_dup(node->priv->attrs,
CRM_ATTR_SITE_NAME, site_name);
} else if (cluster_name) {
/* Default to cluster-name if unset */
pcmk__insert_dup(node->priv->attrs,
CRM_ATTR_SITE_NAME, cluster_name);
}
}
}
static GList *
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GList *gIter = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP, NULL,
NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) {
crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc);
crm_xml_add(rsc_op, PCMK_XA_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS,
NULL, NULL);
pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
CRM_CHECK(status != NULL, return NULL);
for (node_state = pcmk__xe_first_child(status, PCMK__XE_NODE_STATE, NULL,
NULL);
node_state != NULL;
node_state = pcmk__xe_next(node_state, PCMK__XE_NODE_STATE)) {
const char *uname = crm_element_value(node_state, PCMK_XA_UNAME);
if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
continue;
}
this_node = pcmk_find_node(scheduler, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
} else {
determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
|| pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL,
NULL);
tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL,
NULL);
for (lrm_rsc = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCE,
NULL, NULL);
lrm_rsc != NULL;
lrm_rsc = pcmk__xe_next(lrm_rsc, PCMK__XE_LRM_RESOURCE)) {
const char *rsc_id = crm_element_value(lrm_rsc, PCMK_XA_ID);
if ((rsc != NULL)
&& !pcmk__str_eq(rsc_id, rsc, pcmk__str_none)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
return output;
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Jul 8, 6:34 PM (7 h, 3 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2002687
Default Alt Text
(365 KB)

Event Timeline