Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
index b4919351d0..b183fd51a8 100644
--- a/include/crm/pengine/pe_types.h
+++ b/include/crm/pengine/pe_types.h
@@ -1,460 +1,458 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_PENGINE_PE_TYPES__H
# define PCMK__CRM_PENGINE_PE_TYPES__H
# include <stdbool.h> // bool
# include <sys/types.h> // time_t
# include <libxml/tree.h> // xmlNode
# include <glib.h> // gboolean, guint, GList, GHashTable
# include <crm/common/iso8601.h>
# include <crm/common/scheduler.h>
# include <crm/pengine/common.h>
#ifdef __cplusplus
extern "C" {
#endif
/*!
* \file
* \brief Data types for cluster status
* \ingroup pengine
*/
typedef struct pe_node_s pe_node_t;
typedef struct pe_action_s pe_action_t;
typedef struct pe_resource_s pe_resource_t;
typedef struct pe_working_set_s pe_working_set_t;
typedef struct resource_object_functions_s {
gboolean (*unpack) (pe_resource_t*, pe_working_set_t*);
pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search,
const pe_node_t *node, int flags);
/* parameter result must be free'd */
char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*,
pe_working_set_t*);
//! \deprecated will be removed in a future release
void (*print) (pe_resource_t*, const char*, long, void*);
gboolean (*active) (pe_resource_t*, gboolean);
enum rsc_role_e (*state) (const pe_resource_t*, gboolean);
pe_node_t *(*location) (const pe_resource_t*, GList**, int);
void (*free) (pe_resource_t*);
void (*count) (pe_resource_t*);
gboolean (*is_filtered) (const pe_resource_t*, GList *, gboolean);
/*!
* \brief Find a node (and optionally count all) where resource is active
*
* \param[in] rsc Resource to check
* \param[out] count_all If not NULL, set this to count of active nodes
* \param[out] count_clean If not NULL, set this to count of clean nodes
*
* \return A node where the resource is active, preferring the source node
* if the resource is involved in a partial migration or a clean,
* online node if the resource's "requires" is "quorum" or
* "nothing", or NULL if the resource is inactive.
*/
pe_node_t *(*active_node)(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean);
/*!
* \brief Get maximum resource instances per node
*
* \param[in] rsc Resource to check
*
* \return Maximum number of \p rsc instances that can be active on one node
*/
unsigned int (*max_per_node)(const pe_resource_t *rsc);
} resource_object_functions_t;
typedef struct resource_alloc_functions_s resource_alloc_functions_t;
struct pe_working_set_s {
xmlNode *input;
crm_time_t *now;
/* options extracted from the input */
char *dc_uuid;
pe_node_t *dc_node;
const char *stonith_action;
const char *placement_strategy;
unsigned long long flags;
int stonith_timeout;
enum pe_quorum_policy no_quorum_policy;
GHashTable *config_hash;
GHashTable *tickets;
// Actions for which there can be only one (e.g. fence nodeX)
GHashTable *singletons;
GList *nodes;
GList *resources;
GList *placement_constraints;
GList *ordering_constraints;
GList *colocation_constraints;
GList *ticket_constraints;
GList *actions;
xmlNode *failed;
xmlNode *op_defaults;
xmlNode *rsc_defaults;
/* stats */
int num_synapse;
int max_valid_nodes; //! Deprecated (will be removed in a future release)
int order_id;
int action_id;
/* final output */
xmlNode *graph;
GHashTable *template_rsc_sets;
const char *localhost;
GHashTable *tags;
int blocked_resources;
int disabled_resources;
GList *param_check; // History entries that need to be checked
GList *stop_needed; // Containers that need stop actions
time_t recheck_by; // Hint to controller to re-run scheduler by this time
int ninstances; // Total number of resource instances
guint shutdown_lock;// How long (seconds) to lock resources to shutdown node
int priority_fencing_delay; // Priority fencing delay
void *priv;
guint node_pending_timeout; // Node pending timeout
};
struct pe_node_shared_s {
const char *id;
const char *uname;
enum node_type type;
/* @TODO convert these flags into a bitfield */
gboolean online;
gboolean standby;
gboolean standby_onfail;
gboolean pending;
gboolean unclean;
gboolean unseen;
gboolean shutdown;
gboolean expected_up;
gboolean is_dc;
gboolean maintenance;
gboolean rsc_discovery_enabled;
gboolean remote_requires_reset;
gboolean remote_was_fenced;
gboolean remote_maintenance; /* what the remote-rsc is thinking */
gboolean unpacked;
int num_resources;
pe_resource_t *remote_rsc;
GList *running_rsc; /* pe_resource_t* */
GList *allocated_rsc; /* pe_resource_t* */
GHashTable *attrs; /* char* => char* */
GHashTable *utilization;
GHashTable *digest_cache; //!< cache of calculated resource digests
int priority; // calculated based on the priority of resources running on the node
pe_working_set_t *data_set; //!< Cluster that this node is part of
};
struct pe_node_s {
int weight;
gboolean fixed; //!< \deprecated Will be removed in a future release
int count;
struct pe_node_shared_s *details;
int rsc_discover_mode;
};
-# define pe_rsc_critical 0x00008000ULL
-
# define pe_rsc_failed 0x00010000ULL
# define pe_rsc_detect_loop 0x00020000ULL
# define pe_rsc_runnable 0x00040000ULL
# define pe_rsc_start_pending 0x00080000ULL
//!< \deprecated Do not use
# define pe_rsc_starting 0x00100000ULL
//!< \deprecated Do not use
# define pe_rsc_stopping 0x00200000ULL
# define pe_rsc_stop_unexpected 0x00400000ULL
# define pe_rsc_allow_migrate 0x00800000ULL
# define pe_rsc_failure_ignored 0x01000000ULL
# define pe_rsc_replica_container 0x02000000ULL
# define pe_rsc_maintenance 0x04000000ULL
# define pe_rsc_is_container 0x08000000ULL
# define pe_rsc_needs_quorum 0x10000000ULL
# define pe_rsc_needs_fencing 0x20000000ULL
# define pe_rsc_needs_unfencing 0x40000000ULL
/* *INDENT-OFF* */
enum pe_action_flags {
pe_action_pseudo = 0x00001,
pe_action_runnable = 0x00002,
pe_action_optional = 0x00004,
pe_action_print_always = 0x00008,
pe_action_have_node_attrs = 0x00010,
pe_action_implied_by_stonith = 0x00040,
pe_action_migrate_runnable = 0x00080,
pe_action_dumped = 0x00100,
pe_action_processed = 0x00200,
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
pe_action_clear = 0x00400, //! \deprecated Unused
#endif
pe_action_dangle = 0x00800,
/* This action requires one or more of its dependencies to be runnable.
* We use this to clear the runnable flag before checking dependencies.
*/
pe_action_requires_any = 0x01000,
pe_action_reschedule = 0x02000,
pe_action_tracking = 0x04000,
pe_action_dedup = 0x08000, //! Internal state tracking when creating graph
pe_action_dc = 0x10000, //! Action may run on DC instead of target
};
/* *INDENT-ON* */
struct pe_resource_s {
char *id;
char *clone_name;
xmlNode *xml;
xmlNode *orig_xml;
xmlNode *ops_xml;
pe_working_set_t *cluster;
pe_resource_t *parent;
enum pe_obj_types variant;
void *variant_opaque;
resource_object_functions_t *fns;
resource_alloc_functions_t *cmds;
enum rsc_recovery_type recovery_type;
enum pe_restart restart_type; //!< \deprecated will be removed in future release
int priority;
int stickiness;
int sort_index;
int failure_timeout;
int migration_threshold;
guint remote_reconnect_ms;
char *pending_task;
unsigned long long flags;
// @TODO merge these into flags
gboolean is_remote_node;
gboolean exclusive_discover;
/* Pay special attention to whether you want to use rsc_cons_lhs and
* rsc_cons directly, which include only colocations explicitly involving
* this resource, or call libpacemaker's pcmk__with_this_colocations() and
* pcmk__this_with_colocations() functions, which may return relevant
* colocations involving the resource's ancestors as well.
*/
//!@{
//! This field should be treated as internal to Pacemaker
GList *rsc_cons_lhs; // List of pcmk__colocation_t*
GList *rsc_cons; // List of pcmk__colocation_t*
GList *rsc_location; // List of pe__location_t*
GList *actions; // List of pe_action_t*
GList *rsc_tickets; // List of rsc_ticket*
//!@}
pe_node_t *allocated_to;
pe_node_t *partial_migration_target;
pe_node_t *partial_migration_source;
GList *running_on; /* pe_node_t* */
GHashTable *known_on; /* pe_node_t* */
GHashTable *allowed_nodes; /* pe_node_t* */
enum rsc_role_e role;
enum rsc_role_e next_role;
GHashTable *meta;
GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead
GHashTable *utilization;
GList *children; /* pe_resource_t* */
GList *dangling_migrations; /* pe_node_t* */
pe_resource_t *container;
GList *fillers;
// @COMPAT These should be made const at next API compatibility break
pe_node_t *pending_node; // Node on which pending_task is happening
pe_node_t *lock_node; // Resource is shutdown-locked to this node
time_t lock_time; // When shutdown lock started
/* Resource parameters may have node-attribute-based rules, which means the
* values can vary by node. This table is a cache of parameter name/value
* tables for each node (as needed). Use pe_rsc_params() to get the table
* for a given node.
*/
GHashTable *parameter_cache; // Key = node name, value = parameters table
};
struct pe_action_s {
int id;
int priority;
pe_resource_t *rsc;
pe_node_t *node;
xmlNode *op_entry;
char *task;
char *uuid;
char *cancel_task;
char *reason;
enum pe_action_flags flags;
enum rsc_start_requirement needs;
enum action_fail_response on_fail;
enum rsc_role_e fail_role;
GHashTable *meta;
GHashTable *extra;
/*
* These two varables are associated with the constraint logic
* that involves first having one or more actions runnable before
* then allowing this action to execute.
*
* These varables are used with features such as 'clone-min' which
* requires at minimum X number of cloned instances to be running
* before an order dependency can run. Another option that uses
* this is 'require-all=false' in ordering constrants. This option
* says "only require one instance of a resource to start before
* allowing dependencies to start" -- basically, require-all=false is
* the same as clone-min=1.
*/
/* current number of known runnable actions in the before list. */
int runnable_before;
/* the number of "before" runnable actions required for this action
* to be considered runnable */
int required_runnable_before;
GList *actions_before; /* pe_action_wrapper_t* */
GList *actions_after; /* pe_action_wrapper_t* */
/* Some of the above fields could be moved to the details,
* except for API backward compatibility.
*/
void *action_details; // varies by type of action
};
typedef struct pe_ticket_s {
char *id;
gboolean granted;
time_t last_granted;
gboolean standby;
GHashTable *state;
} pe_ticket_t;
typedef struct pe_tag_s {
char *id;
GList *refs;
} pe_tag_t;
//! Internal tracking for transition graph creation
enum pe_link_state {
pe_link_not_dumped, //! Internal tracking for transition graph creation
pe_link_dumped, //! Internal tracking for transition graph creation
pe_link_dup, //! \deprecated No longer used by Pacemaker
};
enum pe_discover_e {
pe_discover_always = 0,
pe_discover_never,
pe_discover_exclusive,
};
/* *INDENT-OFF* */
enum pe_ordering {
pe_order_none = 0x0, /* deleted */
pe_order_optional = 0x1, /* pure ordering, nothing implied */
pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */
pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */
pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */
pe_order_promoted_implies_first = 0x40, /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */
/* first requires then to be both runnable and migrate runnable. */
pe_order_implies_first_migratable = 0x80,
pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */
pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */
pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX',
* ensure instances of 'then' on 'nodeX' are too.
* Only really useful if 'then' is a clone and 'first' is not
*/
pe_order_probe = 0x800, /* If 'first->rsc' is
* - running but about to stop, ignore the constraint
* - otherwise, behave as runnable_left
*/
pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */
pe_order_stonith_stop = 0x2000, //<! \deprecated Will be removed in future release
pe_order_serialize_only = 0x4000, /* serialize */
pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */
pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */
pe_order_load = 0x200000, /* Only relevant if... */
pe_order_one_or_more = 0x400000, /* 'then' is runnable only if one or more of its dependencies are too */
pe_order_anti_colocation = 0x800000,
pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
pe_order_then_cancels_first = 0x2000000, // if 'then' becomes required, 'first' becomes optional
pe_order_trace = 0x4000000, /* test marker */
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
// \deprecated Use pe_order_promoted_implies_first instead
pe_order_implies_first_master = pe_order_promoted_implies_first,
#endif
};
/* *INDENT-ON* */
typedef struct pe_action_wrapper_s {
enum pe_ordering type;
enum pe_link_state state;
pe_action_t *action;
} pe_action_wrapper_t;
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include <crm/pengine/pe_types_compat.h>
#endif
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_PENGINE_PE_TYPES__H
diff --git a/include/crm/pengine/pe_types_compat.h b/include/crm/pengine/pe_types_compat.h
index 1014df231e..0a6c399526 100644
--- a/include/crm/pengine/pe_types_compat.h
+++ b/include/crm/pengine/pe_types_compat.h
@@ -1,191 +1,194 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H
# define PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H
#include <crm/pengine/pe_types.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Deprecated Pacemaker scheduler API
* \ingroup pengine
* \deprecated Do not include this header directly. The scheduler APIs in this
* header, and the header itself, will be removed in a future
* release.
*/
//! \deprecated Use pcmk_rsc_removed instead
#define pe_rsc_orphan pcmk_rsc_removed
//! \deprecated Use pcmk_rsc_managed instead
#define pe_rsc_managed pcmk_rsc_managed
//! \deprecated Use pcmk_rsc_blocked instead
#define pe_rsc_block pcmk_rsc_blocked
//! \deprecated Use pcmk_rsc_removed_filler instead
#define pe_rsc_orphan_container_filler pcmk_rsc_removed_filler
//! \deprecated Use pcmk_rsc_notify instead
#define pe_rsc_notify pcmk_rsc_notify
//! \deprecated Use pcmk_rsc_unique instead
#define pe_rsc_unique pcmk_rsc_unique
//! \deprecated Use pcmk_rsc_fence_device instead
#define pe_rsc_fence_device pcmk_rsc_fence_device
//! \deprecated Use pcmk_rsc_promotable instead
#define pe_rsc_promotable pcmk_rsc_promotable
//! \deprecated Use pcmk_rsc_unassigned instead
#define pe_rsc_provisional pcmk_rsc_unassigned
//! \deprecated Use pcmk_rsc_assigning instead
#define pe_rsc_allocating pcmk_rsc_assigning
//! \deprecated Use pcmk_rsc_updating_nodes instead
#define pe_rsc_merging pcmk_rsc_updating_nodes
//! \deprecated Use pcmk_rsc_restarting instead
#define pe_rsc_restarting pcmk_rsc_restarting
//! \deprecated Use pcmk_rsc_stop_if_failed instead
#define pe_rsc_stop pcmk_rsc_stop_if_failed
//! \deprecated Use pcmk_rsc_reload instead
#define pe_rsc_reload pcmk_rsc_reload
//! \deprecated Use pcmk_rsc_remote_nesting_allowed instead
#define pe_rsc_allow_remote_remotes pcmk_rsc_remote_nesting_allowed
+//! \deprecated Use pcmk_rsc_critical instead
+#define pe_rsc_critical pcmk_rsc_critical
+
//! \deprecated Use pcmk_sched_quorate instead
#define pe_flag_have_quorum pcmk_sched_quorate
//! \deprecated Use pcmk_sched_symmetric_cluster instead
#define pe_flag_symmetric_cluster pcmk_sched_symmetric_cluster
//! \deprecated Use pcmk_sched_in_maintenance instead
#define pe_flag_maintenance_mode pcmk_sched_in_maintenance
//! \deprecated Use pcmk_sched_fencing_enabled instead
#define pe_flag_stonith_enabled pcmk_sched_fencing_enabled
//! \deprecated Use pcmk_sched_have_fencing instead
#define pe_flag_have_stonith_resource pcmk_sched_have_fencing
//! \deprecated Use pcmk_sched_enable_unfencing instead
#define pe_flag_enable_unfencing pcmk_sched_enable_unfencing
//! \deprecated Use pcmk_sched_concurrent_fencing instead
#define pe_flag_concurrent_fencing pcmk_sched_concurrent_fencing
//! \deprecated Use pcmk_sched_stop_removed_resources instead
#define pe_flag_stop_rsc_orphans pcmk_sched_stop_removed_resources
//! \deprecated Use pcmk_sched_cancel_removed_actions instead
#define pe_flag_stop_action_orphans pcmk_sched_cancel_removed_actions
//! \deprecated Use pcmk_sched_stop_all instead
#define pe_flag_stop_everything pcmk_sched_stop_all
//! \deprecated Use pcmk_sched_start_failure_fatal instead
#define pe_flag_start_failure_fatal pcmk_sched_start_failure_fatal
//! \deprecated Do not use
#define pe_flag_remove_after_stop pcmk_sched_remove_after_stop
//! \deprecated Use pcmk_sched_startup_fencing instead
#define pe_flag_startup_fencing pcmk_sched_startup_fencing
//! \deprecated Use pcmk_sched_shutdown_lock instead
#define pe_flag_shutdown_lock pcmk_sched_shutdown_lock
//! \deprecated Use pcmk_sched_probe_resources instead
#define pe_flag_startup_probes pcmk_sched_probe_resources
//! \deprecated Use pcmk_sched_have_status instead
#define pe_flag_have_status pcmk_sched_have_status
//! \deprecated Use pcmk_sched_have_remote_nodes instead
#define pe_flag_have_remote_nodes pcmk_sched_have_remote_nodes
//! \deprecated Use pcmk_sched_location_only instead
#define pe_flag_quick_location pcmk_sched_location_only
//! \deprecated Use pcmk_sched_sanitized instead
#define pe_flag_sanitized pcmk_sched_sanitized
//! \deprecated Do not use
#define pe_flag_stdout (1ULL << 22)
//! \deprecated Use pcmk_sched_no_counts instead
#define pe_flag_no_counts pcmk_sched_no_counts
//! \deprecated Use pcmk_sched_no_compat instead
#define pe_flag_no_compat pcmk_sched_no_compat
//! \deprecated Use pcmk_sched_output_scores instead
#define pe_flag_show_scores pcmk_sched_output_scores
//! \deprecated Use pcmk_sched_show_utilization instead
#define pe_flag_show_utilization pcmk_sched_show_utilization
//! \deprecated Use pcmk_sched_validate_only instead
#define pe_flag_check_config pcmk_sched_validate_only
//!@{
//! \deprecated Do not use (unused by Pacemaker)
enum pe_graph_flags {
pe_graph_none = 0x00000,
pe_graph_updated_first = 0x00001,
pe_graph_updated_then = 0x00002,
pe_graph_disable = 0x00004,
};
//!@}
//!@{
//! \deprecated Do not use
enum pe_check_parameters {
pe_check_last_failure,
pe_check_active,
};
//!@}
//!< \deprecated Use pe_action_t instead
typedef struct pe_action_s action_t;
//!< \deprecated Use pe_action_wrapper_t instead
typedef struct pe_action_wrapper_s action_wrapper_t;
//!< \deprecated Use pe_node_t instead
typedef struct pe_node_s node_t;
//!< \deprecated Use enum pe_quorum_policy instead
typedef enum pe_quorum_policy no_quorum_policy_t;
//!< \deprecated use pe_resource_t instead
typedef struct pe_resource_s resource_t;
//!< \deprecated Use pe_tag_t instead
typedef struct pe_tag_s tag_t;
//!< \deprecated Use pe_ticket_t instead
typedef struct pe_ticket_s ticket_t;
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H
diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c
index 887d439d68..9af6ce8c13 100644
--- a/lib/pacemaker/pcmk_sched_colocation.c
+++ b/lib/pacemaker/pcmk_sched_colocation.c
@@ -1,1906 +1,1906 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <glib.h>
#include <crm/crm.h>
#include <crm/common/scheduler_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
#include "crm/common/util.h"
#include "crm/common/xml_internal.h"
#include "crm/msg_xml.h"
#include "libpacemaker_private.h"
// Used to temporarily mark a node as unusable
#define INFINITY_HACK (INFINITY * -100)
/*!
* \internal
* \brief Compare two colocations according to priority
*
* Compare two colocations according to the order in which they should be
* considered, based on either their dependent resources or their primary
* resources -- preferring (in order):
* * Colocation that is not \c NULL
* * Colocation whose resource has higher priority
* * Colocation whose resource is of a higher-level variant
* (bundle > clone > group > primitive)
* * Colocation whose resource is promotable, if both are clones
* * Colocation whose resource has lower ID in lexicographic order
*
* \param[in] colocation1 First colocation to compare
* \param[in] colocation2 Second colocation to compare
* \param[in] dependent If \c true, compare colocations by dependent
* priority; otherwise compare them by primary priority
*
* \return A negative number if \p colocation1 should be considered first,
* a positive number if \p colocation2 should be considered first,
* or 0 if order doesn't matter
*/
static gint
cmp_colocation_priority(const pcmk__colocation_t *colocation1,
const pcmk__colocation_t *colocation2, bool dependent)
{
const pe_resource_t *rsc1 = NULL;
const pe_resource_t *rsc2 = NULL;
if (colocation1 == NULL) {
return 1;
}
if (colocation2 == NULL) {
return -1;
}
if (dependent) {
rsc1 = colocation1->dependent;
rsc2 = colocation2->dependent;
CRM_ASSERT(colocation1->primary != NULL);
} else {
rsc1 = colocation1->primary;
rsc2 = colocation2->primary;
CRM_ASSERT(colocation1->dependent != NULL);
}
CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
if (rsc1->priority > rsc2->priority) {
return -1;
}
if (rsc1->priority < rsc2->priority) {
return 1;
}
// Process clones before primitives and groups
if (rsc1->variant > rsc2->variant) {
return -1;
}
if (rsc1->variant < rsc2->variant) {
return 1;
}
/* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
* clones (probably unnecessary, but avoids having to update regression
* tests)
*/
if (rsc1->variant == pcmk_rsc_variant_clone) {
if (pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
&& !pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return -1;
}
if (!pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
&& pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return 1;
}
}
return strcmp(rsc1->id, rsc2->id);
}
/*!
* \internal
* \brief Compare two colocations according to priority based on dependents
*
* Compare two colocations according to the order in which they should be
* considered, based on their dependent resources -- preferring (in order):
* * Colocation that is not \c NULL
* * Colocation whose resource has higher priority
* * Colocation whose resource is of a higher-level variant
* (bundle > clone > group > primitive)
* * Colocation whose resource is promotable, if both are clones
* * Colocation whose resource has lower ID in lexicographic order
*
* \param[in] a First colocation to compare
* \param[in] b Second colocation to compare
*
* \return A negative number if \p a should be considered first,
* a positive number if \p b should be considered first,
* or 0 if order doesn't matter
*/
static gint
cmp_dependent_priority(gconstpointer a, gconstpointer b)
{
return cmp_colocation_priority(a, b, true);
}
/*!
* \internal
* \brief Compare two colocations according to priority based on primaries
*
* Compare two colocations according to the order in which they should be
* considered, based on their primary resources -- preferring (in order):
* * Colocation that is not \c NULL
* * Colocation whose primary has higher priority
* * Colocation whose primary is of a higher-level variant
* (bundle > clone > group > primitive)
* * Colocation whose primary is promotable, if both are clones
* * Colocation whose primary has lower ID in lexicographic order
*
* \param[in] a First colocation to compare
* \param[in] b Second colocation to compare
*
* \return A negative number if \p a should be considered first,
* a positive number if \p b should be considered first,
* or 0 if order doesn't matter
*/
static gint
cmp_primary_priority(gconstpointer a, gconstpointer b)
{
return cmp_colocation_priority(a, b, false);
}
/*!
* \internal
* \brief Add a "this with" colocation constraint to a sorted list
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The list will be sorted using cmp_primary_priority().
*/
void
pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
const pe_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
pe_rsc_trace(rsc,
"Adding colocation %s (%s with %s using %s @%s) to "
"'this with' list for %s",
colocation->id, colocation->dependent->id,
colocation->primary->id, colocation->node_attribute,
pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_primary_priority);
}
/*!
* \internal
* \brief Add a list of "this with" colocation constraints to a list
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The lists must be pre-sorted by cmp_primary_priority().
*/
void
pcmk__add_this_with_list(GList **list, GList *addition,
const pe_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (rsc != NULL));
pcmk__if_tracing(
{}, // Always add each colocation individually if tracing
{
if (*list == NULL) {
// Trivial case for efficiency if not tracing
*list = g_list_copy(addition);
return;
}
}
);
for (const GList *iter = addition; iter != NULL; iter = iter->next) {
pcmk__add_this_with(list, addition->data, rsc);
}
}
/*!
* \internal
* \brief Add a "with this" colocation constraint to a sorted list
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The list will be sorted using cmp_dependent_priority().
*/
void
pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
const pe_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
pe_rsc_trace(rsc,
"Adding colocation %s (%s with %s using %s @%s) to "
"'with this' list for %s",
colocation->id, colocation->dependent->id,
colocation->primary->id, colocation->node_attribute,
pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_dependent_priority);
}
/*!
* \internal
* \brief Add a list of "with this" colocation constraints to a list
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
* \param[in] rsc Resource whose colocations we're getting (for
* logging only)
*
* \note The lists must be pre-sorted by cmp_dependent_priority().
*/
void
pcmk__add_with_this_list(GList **list, GList *addition,
const pe_resource_t *rsc)
{
CRM_ASSERT((list != NULL) && (rsc != NULL));
pcmk__if_tracing(
{}, // Always add each colocation individually if tracing
{
if (*list == NULL) {
// Trivial case for efficiency if not tracing
*list = g_list_copy(addition);
return;
}
}
);
for (const GList *iter = addition; iter != NULL; iter = iter->next) {
pcmk__add_with_this(list, addition->data, rsc);
}
}
/*!
* \internal
* \brief Add orderings necessary for an anti-colocation constraint
*
* \param[in,out] first_rsc One resource in an anti-colocation
* \param[in] first_role Anti-colocation role of \p first_rsc
* \param[in] then_rsc Other resource in the anti-colocation
* \param[in] then_role Anti-colocation role of \p then_rsc
*/
static void
anti_colocation_order(pe_resource_t *first_rsc, int first_role,
pe_resource_t *then_rsc, int then_role)
{
const char *first_tasks[] = { NULL, NULL };
const char *then_tasks[] = { NULL, NULL };
/* Actions to make first_rsc lose first_role */
if (first_role == pcmk_role_promoted) {
first_tasks[0] = PCMK_ACTION_DEMOTE;
} else {
first_tasks[0] = PCMK_ACTION_STOP;
if (first_role == pcmk_role_unpromoted) {
first_tasks[1] = PCMK_ACTION_PROMOTE;
}
}
/* Actions to make then_rsc gain then_role */
if (then_role == pcmk_role_promoted) {
then_tasks[0] = PCMK_ACTION_PROMOTE;
} else {
then_tasks[0] = PCMK_ACTION_START;
if (then_role == pcmk_role_unpromoted) {
then_tasks[1] = PCMK_ACTION_DEMOTE;
}
}
for (int first_lpc = 0;
(first_lpc <= 1) && (first_tasks[first_lpc] != NULL); first_lpc++) {
for (int then_lpc = 0;
(then_lpc <= 1) && (then_tasks[then_lpc] != NULL); then_lpc++) {
pcmk__order_resource_actions(first_rsc, first_tasks[first_lpc],
then_rsc, then_tasks[then_lpc],
pe_order_anti_colocation);
}
}
}
/*!
* \internal
* \brief Add a new colocation constraint to a cluster working set
*
* \param[in] id XML ID for this constraint
* \param[in] node_attr Colocate by this attribute (NULL for #uname)
* \param[in] score Constraint score
* \param[in,out] dependent Resource to be colocated
* \param[in,out] primary Resource to colocate \p dependent with
* \param[in] dependent_role Current role of \p dependent
* \param[in] primary_role Current role of \p primary
* \param[in] flags Group of enum pcmk__coloc_flags
*/
void
pcmk__new_colocation(const char *id, const char *node_attr, int score,
pe_resource_t *dependent, pe_resource_t *primary,
const char *dependent_role, const char *primary_role,
uint32_t flags)
{
pcmk__colocation_t *new_con = NULL;
CRM_CHECK(id != NULL, return);
if ((dependent == NULL) || (primary == NULL)) {
pcmk__config_err("Ignoring colocation '%s' because resource "
"does not exist", id);
return;
}
if (score == 0) {
pe_rsc_trace(dependent,
"Ignoring colocation '%s' (%s with %s) because score is 0",
id, dependent->id, primary->id);
return;
}
new_con = calloc(1, sizeof(pcmk__colocation_t));
CRM_ASSERT(new_con != NULL);
if (pcmk__str_eq(dependent_role, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
dependent_role = PCMK__ROLE_UNKNOWN;
}
if (pcmk__str_eq(primary_role, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
primary_role = PCMK__ROLE_UNKNOWN;
}
new_con->id = id;
new_con->dependent = dependent;
new_con->primary = primary;
new_con->score = score;
new_con->dependent_role = text2role(dependent_role);
new_con->primary_role = text2role(primary_role);
new_con->node_attribute = pcmk__s(node_attr, CRM_ATTR_UNAME);
new_con->flags = flags;
pcmk__add_this_with(&(dependent->rsc_cons), new_con, dependent);
pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con, primary);
dependent->cluster->colocation_constraints = g_list_prepend(
dependent->cluster->colocation_constraints, new_con);
if (score <= -INFINITY) {
anti_colocation_order(dependent, new_con->dependent_role, primary,
new_con->primary_role);
anti_colocation_order(primary, new_con->primary_role, dependent,
new_con->dependent_role);
}
}
/*!
* \internal
* \brief Return the boolean influence corresponding to configuration
*
* \param[in] coloc_id Colocation XML ID (for error logging)
* \param[in] rsc Resource involved in constraint (for default)
* \param[in] influence_s String value of influence option
*
* \return pcmk__coloc_influence if string evaluates true, or string is NULL or
* invalid and resource's critical option evaluates true, otherwise
* pcmk__coloc_none
*/
static uint32_t
unpack_influence(const char *coloc_id, const pe_resource_t *rsc,
const char *influence_s)
{
if (influence_s != NULL) {
int influence_i = 0;
if (crm_str_to_boolean(influence_s, &influence_i) < 0) {
pcmk__config_err("Constraint '%s' has invalid value for "
XML_COLOC_ATTR_INFLUENCE " (using default)",
coloc_id);
} else {
return (influence_i == 0)? pcmk__coloc_none : pcmk__coloc_influence;
}
}
- if (pcmk_is_set(rsc->flags, pe_rsc_critical)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_critical)) {
return pcmk__coloc_influence;
}
return pcmk__coloc_none;
}
static void
unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
const char *influence_s, pe_working_set_t *data_set)
{
xmlNode *xml_rsc = NULL;
pe_resource_t *other = NULL;
pe_resource_t *resource = NULL;
const char *set_id = ID(set);
const char *role = crm_element_value(set, "role");
bool with_previous = false;
int local_score = score;
bool sequential = false;
uint32_t flags = pcmk__coloc_none;
const char *xml_rsc_id = NULL;
const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE);
if (score_s) {
local_score = char2score(score_s);
}
if (local_score == 0) {
crm_trace("Ignoring colocation '%s' for set '%s' because score is 0",
coloc_id, set_id);
return;
}
/* @COMPAT The deprecated "ordering" attribute specifies whether resources
* in a positive-score set are colocated with the previous or next resource.
*/
if (pcmk__str_eq(crm_element_value(set, "ordering"), "group",
pcmk__str_null_matches|pcmk__str_casei)) {
with_previous = true;
} else {
pe_warn_once(pe_wo_set_ordering,
"Support for 'ordering' other than 'group' in "
XML_CONS_TAG_RSC_SET " (such as %s) is deprecated and "
"will be removed in a future release", set_id);
}
if ((pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok)
&& !sequential) {
return;
}
if (local_score > 0) {
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xml_rsc_id = ID(xml_rsc);
resource = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (resource == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring %s and later resources in set %s: "
"No such resource", xml_rsc_id, set_id);
return;
}
if (other != NULL) {
flags = pcmk__coloc_explicit
| unpack_influence(coloc_id, resource, influence_s);
if (with_previous) {
pe_rsc_trace(resource, "Colocating %s with %s in set %s",
resource->id, other->id, set_id);
pcmk__new_colocation(set_id, NULL, local_score, resource,
other, role, role, flags);
} else {
pe_rsc_trace(resource, "Colocating %s with %s in set %s",
other->id, resource->id, set_id);
pcmk__new_colocation(set_id, NULL, local_score, other,
resource, role, role, flags);
}
}
other = resource;
}
} else {
/* Anti-colocating with every prior resource is
* the only way to ensure the intuitive result
* (i.e. that no one in the set can run with anyone else in the set)
*/
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xmlNode *xml_rsc_with = NULL;
xml_rsc_id = ID(xml_rsc);
resource = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (resource == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring %s and later resources in set %s: "
"No such resource", xml_rsc_id, set_id);
return;
}
flags = pcmk__coloc_explicit
| unpack_influence(coloc_id, resource, influence_s);
for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc_with != NULL;
xml_rsc_with = crm_next_same_xml(xml_rsc_with)) {
xml_rsc_id = ID(xml_rsc_with);
if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) {
break;
}
other = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
CRM_ASSERT(other != NULL); // We already processed it
pcmk__new_colocation(set_id, NULL, local_score,
resource, other, role, role, flags);
}
}
}
}
/*!
* \internal
* \brief Colocate two resource sets relative to each other
*
* \param[in] id Colocation XML ID
* \param[in] set1 Dependent set
* \param[in] set2 Primary set
* \param[in] score Colocation score
* \param[in] influence_s Value of colocation's "influence" attribute
* \param[in,out] data_set Cluster working set
*/
static void
colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
int score, const char *influence_s,
pe_working_set_t *data_set)
{
xmlNode *xml_rsc = NULL;
pe_resource_t *rsc_1 = NULL;
pe_resource_t *rsc_2 = NULL;
const char *xml_rsc_id = NULL;
const char *role_1 = crm_element_value(set1, "role");
const char *role_2 = crm_element_value(set2, "role");
int rc = pcmk_rc_ok;
bool sequential = false;
uint32_t flags = pcmk__coloc_none;
if (score == 0) {
crm_trace("Ignoring colocation '%s' between sets %s and %s "
"because score is 0", id, ID(set1), ID(set2));
return;
}
rc = pcmk__xe_get_bool_attr(set1, "sequential", &sequential);
if ((rc != pcmk_rc_ok) || sequential) {
// Get the first one
xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
if (xml_rsc != NULL) {
xml_rsc_id = ID(xml_rsc);
rsc_1 = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (rsc_1 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s with set %s "
"because first resource %s not found",
ID(set1), ID(set2), xml_rsc_id);
return;
}
}
}
rc = pcmk__xe_get_bool_attr(set2, "sequential", &sequential);
if ((rc != pcmk_rc_ok) || sequential) {
// Get the last one
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xml_rsc_id = ID(xml_rsc);
}
rsc_2 = pcmk__find_constraint_resource(data_set->resources, xml_rsc_id);
if (rsc_2 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s with set %s "
"because last resource %s not found",
ID(set1), ID(set2), xml_rsc_id);
return;
}
}
if ((rsc_1 != NULL) && (rsc_2 != NULL)) { // Both sets are sequential
flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2,
flags);
} else if (rsc_1 != NULL) { // Only set1 is sequential
flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xml_rsc_id = ID(xml_rsc);
rsc_2 = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (rsc_2 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring set %s colocation with resource %s "
"in set %s: No such resource",
ID(set1), xml_rsc_id, ID(set2));
continue;
}
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
role_2, flags);
}
} else if (rsc_2 != NULL) { // Only set2 is sequential
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xml_rsc_id = ID(xml_rsc);
rsc_1 = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (rsc_1 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s resource %s "
"with set %s: No such resource",
ID(set1), xml_rsc_id, ID(set2));
continue;
}
flags = pcmk__coloc_explicit
| unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
role_2, flags);
}
} else { // Neither set is sequential
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xmlNode *xml_rsc_2 = NULL;
xml_rsc_id = ID(xml_rsc);
rsc_1 = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (rsc_1 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s resource %s "
"with set %s: No such resource",
ID(set1), xml_rsc_id, ID(set2));
continue;
}
flags = pcmk__coloc_explicit
| unpack_influence(id, rsc_1, influence_s);
for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL;
xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
xml_rsc_id = ID(xml_rsc_2);
rsc_2 = pcmk__find_constraint_resource(data_set->resources,
xml_rsc_id);
if (rsc_2 == NULL) {
// Should be possible only with validation disabled
pcmk__config_err("Ignoring colocation of set %s resource "
"%s with set %s resource %s: No such "
"resource", ID(set1), ID(xml_rsc),
ID(set2), xml_rsc_id);
continue;
}
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2,
role_1, role_2, flags);
}
}
}
}
static void
unpack_simple_colocation(xmlNode *xml_obj, const char *id,
const char *influence_s, pe_working_set_t *data_set)
{
int score_i = 0;
uint32_t flags = pcmk__coloc_none;
const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
const char *dependent_id = crm_element_value(xml_obj,
XML_COLOC_ATTR_SOURCE);
const char *primary_id = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET);
const char *dependent_role = crm_element_value(xml_obj,
XML_COLOC_ATTR_SOURCE_ROLE);
const char *primary_role = crm_element_value(xml_obj,
XML_COLOC_ATTR_TARGET_ROLE);
const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR);
const char *primary_instance = NULL;
const char *dependent_instance = NULL;
pe_resource_t *primary = NULL;
pe_resource_t *dependent = NULL;
primary = pcmk__find_constraint_resource(data_set->resources, primary_id);
dependent = pcmk__find_constraint_resource(data_set->resources,
dependent_id);
// @COMPAT: Deprecated since 2.1.5
primary_instance = crm_element_value(xml_obj,
XML_COLOC_ATTR_TARGET_INSTANCE);
dependent_instance = crm_element_value(xml_obj,
XML_COLOC_ATTR_SOURCE_INSTANCE);
if (dependent_instance != NULL) {
pe_warn_once(pe_wo_coloc_inst,
"Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
if (primary_instance != NULL) {
pe_warn_once(pe_wo_coloc_inst,
"Support for " XML_COLOC_ATTR_TARGET_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
if (dependent == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", id, dependent_id);
return;
} else if (primary == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", id, primary_id);
return;
} else if ((dependent_instance != NULL) && !pe_rsc_is_clone(dependent)) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"is not a clone but instance '%s' was requested",
id, dependent_id, dependent_instance);
return;
} else if ((primary_instance != NULL) && !pe_rsc_is_clone(primary)) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"is not a clone but instance '%s' was requested",
id, primary_id, primary_instance);
return;
}
if (dependent_instance != NULL) {
dependent = find_clone_instance(dependent, dependent_instance);
if (dependent == NULL) {
pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
"does not have an instance '%s'",
id, dependent_id, dependent_instance);
return;
}
}
if (primary_instance != NULL) {
primary = find_clone_instance(primary, primary_instance);
if (primary == NULL) {
pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
"does not have an instance '%s'",
"'%s'", id, primary_id, primary_instance);
return;
}
}
if (pcmk__xe_attr_is_true(xml_obj, XML_CONS_ATTR_SYMMETRICAL)) {
pcmk__config_warn("The colocation constraint '"
XML_CONS_ATTR_SYMMETRICAL
"' attribute has been removed");
}
if (score) {
score_i = char2score(score);
}
flags = pcmk__coloc_explicit | unpack_influence(id, dependent, influence_s);
pcmk__new_colocation(id, attr, score_i, dependent, primary,
dependent_role, primary_role, flags);
}
// \return Standard Pacemaker return code
static int
unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
pe_working_set_t *data_set)
{
const char *id = NULL;
const char *dependent_id = NULL;
const char *primary_id = NULL;
const char *dependent_role = NULL;
const char *primary_role = NULL;
pe_resource_t *dependent = NULL;
pe_resource_t *primary = NULL;
pe_tag_t *dependent_tag = NULL;
pe_tag_t *primary_tag = NULL;
xmlNode *dependent_set = NULL;
xmlNode *primary_set = NULL;
bool any_sets = false;
*expanded_xml = NULL;
CRM_CHECK(xml_obj != NULL, return EINVAL);
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
*expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_colocation");
return pcmk_rc_ok;
}
dependent_id = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
primary_id = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET);
if ((dependent_id == NULL) || (primary_id == NULL)) {
return pcmk_rc_ok;
}
if (!pcmk__valid_resource_or_tag(data_set, dependent_id, &dependent,
&dependent_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, dependent_id);
return pcmk_rc_unpack_error;
}
if (!pcmk__valid_resource_or_tag(data_set, primary_id, &primary,
&primary_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, primary_id);
return pcmk_rc_unpack_error;
}
if ((dependent != NULL) && (primary != NULL)) {
/* Neither side references any template/tag. */
return pcmk_rc_ok;
}
if ((dependent_tag != NULL) && (primary_tag != NULL)) {
// A colocation constraint between two templates/tags makes no sense
pcmk__config_err("Ignoring constraint '%s' because two templates or "
"tags cannot be colocated", id);
return pcmk_rc_unpack_error;
}
dependent_role = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
primary_role = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE);
*expanded_xml = copy_xml(xml_obj);
// Convert dependent's template/tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &dependent_set, XML_COLOC_ATTR_SOURCE,
true, data_set)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
}
if (dependent_set != NULL) {
if (dependent_role != NULL) {
// Move "rsc-role" into converted resource_set as "role"
crm_xml_add(dependent_set, "role", dependent_role);
xml_remove_prop(*expanded_xml, XML_COLOC_ATTR_SOURCE_ROLE);
}
any_sets = true;
}
// Convert primary's template/tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &primary_set, XML_COLOC_ATTR_TARGET,
true, data_set)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
}
if (primary_set != NULL) {
if (primary_role != NULL) {
// Move "with-rsc-role" into converted resource_set as "role"
crm_xml_add(primary_set, "role", primary_role);
xml_remove_prop(*expanded_xml, XML_COLOC_ATTR_TARGET_ROLE);
}
any_sets = true;
}
if (any_sets) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_colocation");
} else {
free_xml(*expanded_xml);
*expanded_xml = NULL;
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Parse a colocation constraint from XML into a cluster working set
*
* \param[in,out] xml_obj Colocation constraint XML to unpack
* \param[in,out] data_set Cluster working set to add constraint to
*/
void
pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
{
int score_i = 0;
xmlNode *set = NULL;
xmlNode *last = NULL;
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
const char *score = NULL;
const char *influence_s = NULL;
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_DEPEND
" without " CRM_ATTR_ID);
return;
}
if (unpack_colocation_tags(xml_obj, &expanded_xml,
data_set) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
orig_xml = xml_obj;
xml_obj = expanded_xml;
}
score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
if (score != NULL) {
score_i = char2score(score);
}
influence_s = crm_element_value(xml_obj, XML_COLOC_ATTR_INFLUENCE);
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
set = crm_next_same_xml(set)) {
set = expand_idref(set, data_set->input);
if (set == NULL) { // Configuration error, message already logged
if (expanded_xml != NULL) {
free_xml(expanded_xml);
}
return;
}
if (pcmk__str_empty(ID(set))) {
pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET
" without " CRM_ATTR_ID);
continue;
}
unpack_colocation_set(set, score_i, id, influence_s, data_set);
if (last != NULL) {
colocate_rsc_sets(id, last, set, score_i, influence_s, data_set);
}
last = set;
}
if (expanded_xml) {
free_xml(expanded_xml);
xml_obj = orig_xml;
}
if (last == NULL) {
unpack_simple_colocation(xml_obj, id, influence_s, data_set);
}
}
/*!
* \internal
* \brief Make actions of a given type unrunnable for a given resource
*
* \param[in,out] rsc Resource whose actions should be blocked
* \param[in] task Name of action to block
* \param[in] reason Unrunnable start action causing the block
*/
static void
mark_action_blocked(pe_resource_t *rsc, const char *task,
const pe_resource_t *reason)
{
GList *iter = NULL;
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
pe_action_t *action = iter->data;
if (pcmk_is_set(action->flags, pe_action_runnable)
&& pcmk__str_eq(action->task, task, pcmk__str_none)) {
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, reason_text, false);
pcmk__block_colocation_dependents(action);
pcmk__update_action_for_orderings(action, rsc->cluster);
}
}
// If parent resource can't perform an action, neither can any children
for (iter = rsc->children; iter != NULL; iter = iter->next) {
mark_action_blocked((pe_resource_t *) (iter->data), task, reason);
}
free(reason_text);
}
/*!
* \internal
* \brief If an action is unrunnable, block any relevant dependent actions
*
* If a given action is an unrunnable start or promote, block the start or
* promote actions of resources colocated with it, as appropriate to the
* colocations' configured roles.
*
* \param[in,out] action Action to check
*/
void
pcmk__block_colocation_dependents(pe_action_t *action)
{
GList *iter = NULL;
GList *colocations = NULL;
pe_resource_t *rsc = NULL;
bool is_start = false;
if (pcmk_is_set(action->flags, pe_action_runnable)) {
return; // Only unrunnable actions block dependents
}
is_start = pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none);
if (!is_start
&& !pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
return; // Only unrunnable starts and promotes block dependents
}
CRM_ASSERT(action->rsc != NULL); // Start and promote are resource actions
/* If this resource is part of a collective resource, dependents are blocked
* only if all instances of the collective are unrunnable, so check the
* collective resource.
*/
rsc = uber_parent(action->rsc);
if (rsc->parent != NULL) {
rsc = rsc->parent; // Bundle
}
// Colocation fails only if entire primary can't reach desired role
for (iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *child = iter->data;
pe_action_t *child_action = find_first_action(child->actions, NULL,
action->task, NULL);
if ((child_action == NULL)
|| pcmk_is_set(child_action->flags, pe_action_runnable)) {
crm_trace("Not blocking %s colocation dependents because "
"at least %s has runnable %s",
rsc->id, child->id, action->task);
return; // At least one child can reach desired role
}
}
crm_trace("Blocking %s colocation dependents due to unrunnable %s %s",
rsc->id, action->rsc->id, action->task);
// Check each colocation where this resource is primary
colocations = pcmk__with_this_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (colocation->score < INFINITY) {
continue; // Only mandatory colocations block dependent
}
/* If the primary can't start, the dependent can't reach its colocated
* role, regardless of what the primary or dependent colocation role is.
*
* If the primary can't be promoted, the dependent can't reach its
* colocated role if the primary's colocation role is promoted.
*/
if (!is_start && (colocation->primary_role != pcmk_role_promoted)) {
continue;
}
// Block the dependent from reaching its colocated role
if (colocation->dependent_role == pcmk_role_promoted) {
mark_action_blocked(colocation->dependent, PCMK_ACTION_PROMOTE,
action->rsc);
} else {
mark_action_blocked(colocation->dependent, PCMK_ACTION_START,
action->rsc);
}
}
g_list_free(colocations);
}
/*!
* \internal
* \brief Get the resource to use for role comparisons
*
* A bundle replica includes a container and possibly an instance of the bundled
* resource. The dependent in a "with bundle" colocation is colocated with a
* particular bundle container. However, if the colocation includes a role, then
* the role must be checked on the bundled resource instance inside the
* container. The container itself will never be promoted; the bundled resource
* may be.
*
* If the given resource is a bundle replica container, return the resource
* inside it, if any. Otherwise, return the resource itself.
*
* \param[in] rsc Resource to check
*
* \return Resource to use for role comparisons
*/
static const pe_resource_t *
get_resource_for_role(const pe_resource_t *rsc)
{
if (pcmk_is_set(rsc->flags, pe_rsc_replica_container)) {
const pe_resource_t *child = pe__get_rsc_in_container(rsc);
if (child != NULL) {
return child;
}
}
return rsc;
}
/*!
* \internal
* \brief Determine how a colocation constraint should affect a resource
*
* Colocation constraints have different effects at different points in the
* scheduler sequence. Initially, they affect a resource's location; once that
* is determined, then for promotable clones they can affect a resource
* instance's role; after both are determined, the constraints no longer matter.
* Given a specific colocation constraint, check what has been done so far to
* determine what should be affected at the current point in the scheduler.
*
* \param[in] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
* \param[in] preview If true, pretend resources have already been assigned
*
* \return How colocation constraint should be applied at this point
*/
enum pcmk__coloc_affects
pcmk__colocation_affects(const pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation, bool preview)
{
const pe_resource_t *dependent_role_rsc = NULL;
const pe_resource_t *primary_role_rsc = NULL;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (!preview && pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
// Primary resource has not been assigned yet, so we can't do anything
return pcmk__coloc_affects_nothing;
}
dependent_role_rsc = get_resource_for_role(dependent);
primary_role_rsc = get_resource_for_role(primary);
if ((colocation->dependent_role >= pcmk_role_unpromoted)
&& (dependent_role_rsc->parent != NULL)
&& pcmk_is_set(dependent_role_rsc->parent->flags, pcmk_rsc_promotable)
&& !pcmk_is_set(dependent_role_rsc->flags, pcmk_rsc_unassigned)) {
/* This is a colocation by role, and the dependent is a promotable clone
* that has already been assigned, so the colocation should now affect
* the role.
*/
return pcmk__coloc_affects_role;
}
if (!preview && !pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
/* The dependent resource has already been through assignment, so the
* constraint no longer has any effect. Log an error if a mandatory
* colocation constraint has been violated.
*/
const pe_node_t *primary_node = primary->allocated_to;
if (dependent->allocated_to == NULL) {
crm_trace("Skipping colocation '%s': %s will not run anywhere",
colocation->id, dependent->id);
} else if (colocation->score >= INFINITY) {
// Dependent resource must colocate with primary resource
if (!pe__same_node(primary_node, dependent->allocated_to)) {
crm_err("%s must be colocated with %s but is not (%s vs. %s)",
dependent->id, primary->id,
pe__node_name(dependent->allocated_to),
pe__node_name(primary_node));
}
} else if (colocation->score <= -CRM_SCORE_INFINITY) {
// Dependent resource must anti-colocate with primary resource
if (pe__same_node(dependent->allocated_to, primary_node)) {
crm_err("%s and %s must be anti-colocated but are assigned "
"to the same node (%s)",
dependent->id, primary->id,
pe__node_name(primary_node));
}
}
return pcmk__coloc_affects_nothing;
}
if ((colocation->dependent_role != pcmk_role_unknown)
&& (colocation->dependent_role != dependent_role_rsc->next_role)) {
crm_trace("Skipping %scolocation '%s': dependent limited to %s role "
"but %s next role is %s",
((colocation->score < 0)? "anti-" : ""),
colocation->id, role2text(colocation->dependent_role),
dependent_role_rsc->id,
role2text(dependent_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
if ((colocation->primary_role != pcmk_role_unknown)
&& (colocation->primary_role != primary_role_rsc->next_role)) {
crm_trace("Skipping %scolocation '%s': primary limited to %s role "
"but %s next role is %s",
((colocation->score < 0)? "anti-" : ""),
colocation->id, role2text(colocation->primary_role),
primary_role_rsc->id, role2text(primary_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
return pcmk__coloc_affects_location;
}
/*!
* \internal
* \brief Apply colocation to dependent for assignment purposes
*
* Update the allowed node scores of the dependent resource in a colocation,
* for the purposes of assigning it to a node.
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
*/
void
pcmk__apply_coloc_to_scores(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const char *attr = colocation->node_attribute;
const char *value = NULL;
GHashTable *work = NULL;
GHashTableIter iter;
pe_node_t *node = NULL;
if (primary->allocated_to != NULL) {
value = pcmk__colocation_node_attr(primary->allocated_to, attr,
primary);
} else if (colocation->score < 0) {
// Nothing to do (anti-colocation with something that is not running)
return;
}
work = pcmk__copy_node_table(dependent->allowed_nodes);
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (primary->allocated_to == NULL) {
node->weight = pcmk__add_scores(-colocation->score, node->weight);
pe_rsc_trace(dependent,
"Applied %s to %s score on %s (now %s after "
"subtracting %s because primary %s inactive)",
colocation->id, dependent->id, pe__node_name(node),
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score), primary->id);
continue;
}
if (pcmk__str_eq(pcmk__colocation_node_attr(node, attr, dependent),
value, pcmk__str_casei)) {
/* Add colocation score only if optional (or minus infinity). A
* mandatory colocation is a requirement rather than a preference,
* so we don't need to consider it for relative assignment purposes.
* The resource will simply be forbidden from running on the node if
* the primary isn't active there (via the condition above).
*/
if (colocation->score < CRM_SCORE_INFINITY) {
node->weight = pcmk__add_scores(colocation->score,
node->weight);
pe_rsc_trace(dependent,
"Applied %s to %s score on %s (now %s after "
"adding %s)",
colocation->id, dependent->id, pe__node_name(node),
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score));
}
continue;
}
if (colocation->score >= CRM_SCORE_INFINITY) {
/* Only mandatory colocations are relevant when the colocation
* attribute doesn't match, because an attribute not matching is not
* a negative preference -- the colocation is simply relevant only
* where it matches.
*/
node->weight = -CRM_SCORE_INFINITY;
pe_rsc_trace(dependent,
"Banned %s from %s because colocation %s attribute %s "
"does not match",
dependent->id, pe__node_name(node), colocation->id,
attr);
}
}
if ((colocation->score <= -INFINITY) || (colocation->score >= INFINITY)
|| pcmk__any_node_available(work)) {
g_hash_table_destroy(dependent->allowed_nodes);
dependent->allowed_nodes = work;
work = NULL;
} else {
pe_rsc_info(dependent,
"%s: Rolling back scores from %s (no available nodes)",
dependent->id, primary->id);
}
if (work != NULL) {
g_hash_table_destroy(work);
}
}
/*!
* \internal
* \brief Apply colocation to dependent for role purposes
*
* Update the priority of the dependent resource in a colocation, for the
* purposes of selecting its role
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
*/
void
pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const char *dependent_value = NULL;
const char *primary_value = NULL;
const char *attr = colocation->node_attribute;
int score_multiplier = 1;
const pe_resource_t *primary_role_rsc = NULL;
CRM_ASSERT((dependent != NULL) && (primary != NULL) &&
(colocation != NULL));
if ((primary->allocated_to == NULL) || (dependent->allocated_to == NULL)) {
return;
}
dependent_value = pcmk__colocation_node_attr(dependent->allocated_to, attr,
dependent);
primary_value = pcmk__colocation_node_attr(primary->allocated_to, attr,
primary);
primary_role_rsc = get_resource_for_role(primary);
if (!pcmk__str_eq(dependent_value, primary_value, pcmk__str_casei)) {
if ((colocation->score == INFINITY)
&& (colocation->dependent_role == pcmk_role_promoted)) {
dependent->priority = -INFINITY;
}
return;
}
if ((colocation->primary_role != pcmk_role_unknown)
&& (colocation->primary_role != primary_role_rsc->next_role)) {
return;
}
if (colocation->dependent_role == pcmk_role_unpromoted) {
score_multiplier = -1;
}
dependent->priority = pcmk__add_scores(score_multiplier * colocation->score,
dependent->priority);
pe_rsc_trace(dependent,
"Applied %s to %s promotion priority (now %s after %s %s)",
colocation->id, dependent->id,
pcmk_readable_score(dependent->priority),
((score_multiplier == 1)? "adding" : "subtracting"),
pcmk_readable_score(colocation->score));
}
/*!
* \internal
* \brief Find score of highest-scored node that matches colocation attribute
*
* \param[in] rsc Resource whose allowed nodes should be searched
* \param[in] attr Colocation attribute name (must not be NULL)
* \param[in] value Colocation attribute value to require
*/
static int
best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
pe_node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
// Find best allowed node with matching attribute
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if ((node->weight > best_score)
&& pcmk__node_available(node, false, false)
&& pcmk__str_eq(value, pcmk__colocation_node_attr(node, attr, rsc),
pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_none)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
} else {
crm_info("Allowed node %s for %s had best score (%d) "
"of those matching node attribute %s=%s",
best_node, rsc->id, best_score, attr, value);
}
}
return best_score;
}
/*!
* \internal
* \brief Check whether a resource is allowed only on a single node
*
* \param[in] rsc Resource to check
*
* \return \c true if \p rsc is allowed only on one node, otherwise \c false
*/
static bool
allowed_on_one(const pe_resource_t *rsc)
{
GHashTableIter iter;
pe_node_t *allowed_node = NULL;
int allowed_nodes = 0;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &allowed_node)) {
if ((allowed_node->weight >= 0) && (++allowed_nodes > 1)) {
pe_rsc_trace(rsc, "%s is allowed on multiple nodes", rsc->id);
return false;
}
}
pe_rsc_trace(rsc, "%s is allowed %s", rsc->id,
((allowed_nodes == 1)? "on a single node" : "nowhere"));
return (allowed_nodes == 1);
}
/*!
* \internal
* \brief Add resource's colocation matches to current node assignment scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
* \param[in,out] nodes Table of nodes with assignment scores so far
* \param[in] source_rsc Resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p nodes
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; pass NULL to
* ignore stickiness and use default attribute)
* \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
add_node_scores_matching_attr(GHashTable *nodes,
const pe_resource_t *source_rsc,
const pe_resource_t *target_rsc,
const pcmk__colocation_t *colocation,
float factor, bool only_positive)
{
GHashTableIter iter;
pe_node_t *node = NULL;
const char *attr = colocation->node_attribute;
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
float delta_f = 0;
int delta = 0;
int score = 0;
int new_score = 0;
const char *value = pcmk__colocation_node_attr(node, attr, target_rsc);
score = best_node_score_matching_attr(source_rsc, attr, value);
if ((factor < 0) && (score < 0)) {
/* If the dependent is anti-colocated, we generally don't want the
* primary to prefer nodes that the dependent avoids. That could
* lead to unnecessary shuffling of the primary when the dependent
* hits its migration threshold somewhere, for example.
*
* However, there are cases when it is desirable. If the dependent
* can't run anywhere but where the primary is, it would be
* worthwhile to move the primary for the sake of keeping the
* dependent active.
*
* We can't know that exactly at this point since we don't know
* where the primary will be assigned, but we can limit considering
* the preference to when the dependent is allowed only on one node.
* This is less than ideal for multiple reasons:
*
* - the dependent could be allowed on more than one node but have
* anti-colocation primaries on each;
* - the dependent could be a clone or bundle with multiple
* instances, and the dependent as a whole is allowed on multiple
* nodes but some instance still can't run
* - the dependent has considered node-specific criteria such as
* location constraints and stickiness by this point, but might
* have other factors that end up disallowing a node
*
* but the alternative is making the primary move when it doesn't
* need to.
*
* We also consider the primary's stickiness and influence, so the
* user has some say in the matter. (This is the configured primary,
* not a particular instance of the primary, but that doesn't matter
* unless stickiness uses a rule to vary by node, and that seems
* acceptable to ignore.)
*/
if ((colocation->primary->stickiness >= -score)
|| !pcmk__colocation_has_influence(colocation, NULL)
|| !allowed_on_one(colocation->dependent)) {
crm_trace("%s: Filtering %d + %f * %d "
"(double negative disallowed)",
pe__node_name(node), node->weight, factor, score);
continue;
}
}
if (node->weight == INFINITY_HACK) {
crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
pe__node_name(node), node->weight, factor, score);
continue;
}
delta_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
delta = (int) ((delta_f < 0)? (delta_f - 0.5) : (delta_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
if ((delta == 0) && (score != 0)) {
if (factor > 0.0) {
delta = 1;
} else if (factor < 0.0) {
delta = -1;
}
}
new_score = pcmk__add_scores(delta, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
"(negative disallowed, marking node unusable)",
pe__node_name(node), node->weight, factor, score,
new_score);
node->weight = INFINITY_HACK;
continue;
}
if (only_positive && (new_score < 0) && (node->weight == 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
pe__node_name(node), node->weight, factor, score,
new_score);
continue;
}
crm_trace("%s: %d + %f * %d = %d", pe__node_name(node),
node->weight, factor, score, new_score);
node->weight = new_score;
}
}
/*!
* \internal
* \brief Update nodes with scores of colocated resources' nodes
*
* Given a table of nodes and a resource, update the nodes' scores with the
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
* \param[in,out] source_rsc Resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p *nodes
* \param[in] log_id Resource ID for logs (if \c NULL, use
* \p source_rsc ID)
* \param[in,out] nodes Nodes to update (set initial contents to \c NULL
* to copy allowed nodes from \p source_rsc)
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; if \c NULL,
* <tt>source_rsc</tt>'s own matching node scores
* will not be added, and \p *nodes must be \c NULL
* as well)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pcmk__coloc_select values
*
* \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
* the \c pcmk__coloc_select_this_with flag are used together (and only by
* \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
* \note This is the shared implementation of
* \c resource_alloc_functions_t:add_colocated_node_scores().
*/
void
pcmk__add_colocated_node_scores(pe_resource_t *source_rsc,
const pe_resource_t *target_rsc,
const char *log_id,
GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
GHashTable *work = NULL;
CRM_ASSERT((source_rsc != NULL) && (nodes != NULL)
&& ((colocation != NULL)
|| ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
log_id = source_rsc->id;
}
// Avoid infinite recursion
if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
log_id, source_rsc->id);
return;
}
pe__set_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
if (*nodes == NULL) {
work = pcmk__copy_node_table(source_rsc->allowed_nodes);
target_rsc = source_rsc;
} else {
const bool pos = pcmk_is_set(flags, pcmk__coloc_select_nonnegative);
pe_rsc_trace(source_rsc, "%s: Merging %s scores from %s (at %.6f)",
log_id, (pos? "positive" : "all"), source_rsc->id, factor);
work = pcmk__copy_node_table(*nodes);
add_node_scores_matching_attr(work, source_rsc, target_rsc, colocation,
factor, pos);
}
if (work == NULL) {
pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
if (pcmk__any_node_available(work)) {
GList *colocations = NULL;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
colocations = pcmk__this_with_colocations(source_rsc);
pe_rsc_trace(source_rsc,
"Checking additional %d optional '%s with' "
"constraints",
g_list_length(colocations), source_rsc->id);
} else {
colocations = pcmk__with_this_colocations(source_rsc);
pe_rsc_trace(source_rsc,
"Checking additional %d optional 'with %s' "
"constraints",
g_list_length(colocations), source_rsc->id);
}
flags |= pcmk__coloc_select_active;
for (GList *iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *constraint = iter->data;
pe_resource_t *other = NULL;
float other_factor = factor * constraint->score / (float) INFINITY;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
other = constraint->primary;
} else if (!pcmk__colocation_has_influence(constraint, NULL)) {
continue;
} else {
other = constraint->dependent;
}
pe_rsc_trace(source_rsc,
"Optionally merging score of '%s' constraint "
"(%s with %s)",
constraint->id, constraint->dependent->id,
constraint->primary->id);
other->cmds->add_colocated_node_scores(other, target_rsc, log_id,
&work, constraint,
other_factor, flags);
pe__show_node_scores(true, NULL, log_id, work, source_rsc->cluster);
}
g_list_free(colocations);
} else if (pcmk_is_set(flags, pcmk__coloc_select_active)) {
pe_rsc_info(source_rsc, "%s: Rolling back optional scores from %s",
log_id, source_rsc->id);
g_hash_table_destroy(work);
pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
if (pcmk_is_set(flags, pcmk__coloc_select_nonnegative)) {
pe_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if (node->weight == INFINITY_HACK) {
node->weight = 1;
}
}
}
if (*nodes != NULL) {
g_hash_table_destroy(*nodes);
}
*nodes = work;
pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
}
/*!
* \internal
* \brief Apply a "with this" colocation to a resource's allowed node scores
*
* \param[in,out] data Colocation to apply
* \param[in,out] user_data Resource being assigned
*/
void
pcmk__add_dependent_scores(gpointer data, gpointer user_data)
{
pcmk__colocation_t *colocation = data;
pe_resource_t *target_rsc = user_data;
pe_resource_t *source_rsc = colocation->dependent;
const float factor = colocation->score / (float) INFINITY;
uint32_t flags = pcmk__coloc_select_active;
if (!pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
if (target_rsc->variant == pcmk_rsc_variant_clone) {
flags |= pcmk__coloc_select_nonnegative;
}
pe_rsc_trace(target_rsc,
"%s: Incorporating attenuated %s assignment scores due "
"to colocation %s",
target_rsc->id, source_rsc->id, colocation->id);
source_rsc->cmds->add_colocated_node_scores(source_rsc, target_rsc,
source_rsc->id,
&target_rsc->allowed_nodes,
colocation, factor, flags);
}
/*!
* \internal
* \brief Exclude nodes from a dependent's node table if not in a given list
*
* Given a dependent resource in a colocation and a list of nodes where the
* primary resource will run, set a node's score to \c -INFINITY in the
* dependent's node table if not found in the primary nodes list.
*
* \param[in,out] dependent Dependent resource
* \param[in] primary Primary resource (for logging only)
* \param[in] colocation Colocation constraint (for logging only)
* \param[in] primary_nodes List of nodes where the primary will have
* unblocked instances in a suitable role
* \param[in] merge_scores If \c true and a node is found in both \p table
* and \p list, add the node's score in \p list to
* the node's score in \p table
*/
void
pcmk__colocation_intersect_nodes(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
const GList *primary_nodes, bool merge_scores)
{
GHashTableIter iter;
pe_node_t *dependent_node = NULL;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
g_hash_table_iter_init(&iter, dependent->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &dependent_node)) {
const pe_node_t *primary_node = NULL;
primary_node = pe_find_node_id(primary_nodes,
dependent_node->details->id);
if (primary_node == NULL) {
dependent_node->weight = -INFINITY;
pe_rsc_trace(dependent,
"Banning %s from %s (no primary instance) for %s",
dependent->id, pe__node_name(dependent_node),
colocation->id);
} else if (merge_scores) {
dependent_node->weight = pcmk__add_scores(dependent_node->weight,
primary_node->weight);
pe_rsc_trace(dependent,
"Added %s's score %s to %s's score for %s (now %s) "
"for colocation %s",
primary->id, pcmk_readable_score(primary_node->weight),
dependent->id, pe__node_name(dependent_node),
pcmk_readable_score(dependent_node->weight),
colocation->id);
}
}
}
/*!
* \internal
* \brief Get all colocations affecting a resource as the primary
*
* \param[in] rsc Resource to get colocations for
*
* \return Newly allocated list of colocations affecting \p rsc as primary
*
* \note This is a convenience wrapper for the with_this_colocations() method.
*/
GList *
pcmk__with_this_colocations(const pe_resource_t *rsc)
{
GList *list = NULL;
rsc->cmds->with_this_colocations(rsc, rsc, &list);
return list;
}
/*!
* \internal
* \brief Get all colocations affecting a resource as the dependent
*
* \param[in] rsc Resource to get colocations for
*
* \return Newly allocated list of colocations affecting \p rsc as dependent
*
* \note This is a convenience wrapper for the this_with_colocations() method.
*/
GList *
pcmk__this_with_colocations(const pe_resource_t *rsc)
{
GList *list = NULL;
rsc->cmds->this_with_colocations(rsc, rsc, &list);
return list;
}
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index b76eff419b..0dc307d6c5 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -1,948 +1,948 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Assign a group resource to a node
*
* \param[in,out] rsc Group resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and a child of \p rsc can't be
* assigned to a node, set the child's next role to
* stopped and update existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pe_node_t *
pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail)
{
pe_node_t *first_assigned_node = NULL;
pe_resource_t *first_member = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return rsc->allocated_to; // Assignment already done
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Assignment dependency loop detected involving %s",
rsc->id);
return NULL;
}
if (rsc->children == NULL) {
// No members to assign
pe__clear_resource_flags(rsc, pcmk_rsc_unassigned);
return NULL;
}
pe__set_resource_flags(rsc, pcmk_rsc_assigning);
first_member = (pe_resource_t *) rsc->children->data;
rsc->role = first_member->role;
pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
pcmk_sched_output_scores),
rsc, __func__, rsc->allowed_nodes, rsc->cluster);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *member = (pe_resource_t *) iter->data;
pe_node_t *node = NULL;
pe_rsc_trace(rsc, "Assigning group %s member %s",
rsc->id, member->id);
node = member->cmds->assign(member, prefer, stop_if_fail);
if (first_assigned_node == NULL) {
first_assigned_node = node;
}
}
pe__set_next_role(rsc, first_member->next_role, "first group member");
pe__clear_resource_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
return NULL;
}
return first_assigned_node;
}
/*!
* \internal
* \brief Create a pseudo-operation for a group as an ordering point
*
* \param[in,out] group Group resource to create action for
* \param[in] action Action name
*
* \return Newly created pseudo-operation
*/
static pe_action_t *
create_group_pseudo_op(pe_resource_t *group, const char *action)
{
pe_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
action, NULL, TRUE, TRUE, group->cluster);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
return op;
}
/*!
* \internal
* \brief Create all actions needed for a given group resource
*
* \param[in,out] rsc Group resource to create actions for
*/
void
pcmk__group_create_actions(pe_resource_t *rsc)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
pe_rsc_trace(rsc, "Creating actions for group %s", rsc->id);
// Create actions for individual group members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *member = (pe_resource_t *) iter->data;
member->cmds->create_actions(member);
}
// Create pseudo-actions for group itself to serve as ordering points
create_group_pseudo_op(rsc, PCMK_ACTION_START);
create_group_pseudo_op(rsc, PCMK_ACTION_RUNNING);
create_group_pseudo_op(rsc, PCMK_ACTION_STOP);
create_group_pseudo_op(rsc, PCMK_ACTION_STOPPED);
if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE))) {
create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTE);
create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTED);
create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTE);
create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTED);
}
}
// User data for member_internal_constraints()
struct member_data {
// These could be derived from member but this avoids some function calls
bool ordered;
bool colocated;
bool promotable;
pe_resource_t *last_active;
pe_resource_t *previous_member;
};
/*!
* \internal
* \brief Create implicit constraints needed for a group member
*
* \param[in,out] data Group member to create implicit constraints for
* \param[in,out] user_data Member data (struct member_data *)
*/
static void
member_internal_constraints(gpointer data, gpointer user_data)
{
pe_resource_t *member = (pe_resource_t *) data;
struct member_data *member_data = (struct member_data *) user_data;
// For ordering demote vs demote or stop vs stop
uint32_t down_flags = pe_order_implies_first_printed;
// For ordering demote vs demoted or stop vs stopped
uint32_t post_down_flags = pe_order_implies_then_printed;
// Create the individual member's implicit constraints
member->cmds->internal_constraints(member);
if (member_data->previous_member == NULL) {
// This is first member
if (member_data->ordered) {
pe__set_order_flags(down_flags, pe_order_optional);
post_down_flags = pe_order_implies_then;
}
} else if (member_data->colocated) {
uint32_t flags = pcmk__coloc_none;
- if (pcmk_is_set(member->flags, pe_rsc_critical)) {
+ if (pcmk_is_set(member->flags, pcmk_rsc_critical)) {
flags |= pcmk__coloc_influence;
}
// Colocate this member with the previous one
pcmk__new_colocation("#group-members", NULL, INFINITY, member,
member_data->previous_member, NULL, NULL, flags);
}
if (member_data->promotable) {
// Demote group -> demote member -> group is demoted
pcmk__order_resource_actions(member->parent, PCMK_ACTION_DEMOTE,
member, PCMK_ACTION_DEMOTE, down_flags);
pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
member->parent, PCMK_ACTION_DEMOTED,
post_down_flags);
// Promote group -> promote member -> group is promoted
pcmk__order_resource_actions(member, PCMK_ACTION_PROMOTE,
member->parent, PCMK_ACTION_PROMOTED,
pe_order_runnable_left
|pe_order_implies_then
|pe_order_implies_then_printed);
pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
member, PCMK_ACTION_PROMOTE,
pe_order_implies_first_printed);
}
// Stop group -> stop member -> group is stopped
pcmk__order_stops(member->parent, member, down_flags);
pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
member->parent, PCMK_ACTION_STOPPED,
post_down_flags);
// Start group -> start member -> group is started
pcmk__order_starts(member->parent, member, pe_order_implies_first_printed);
pcmk__order_resource_actions(member, PCMK_ACTION_START,
member->parent, PCMK_ACTION_RUNNING,
pe_order_runnable_left
|pe_order_implies_then
|pe_order_implies_then_printed);
if (!member_data->ordered) {
pcmk__order_starts(member->parent, member,
pe_order_implies_then
|pe_order_runnable_left
|pe_order_implies_first_printed);
if (member_data->promotable) {
pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
member, PCMK_ACTION_PROMOTE,
pe_order_implies_then
|pe_order_runnable_left
|pe_order_implies_first_printed);
}
} else if (member_data->previous_member == NULL) {
pcmk__order_starts(member->parent, member, pe_order_none);
if (member_data->promotable) {
pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
member, PCMK_ACTION_PROMOTE,
pe_order_none);
}
} else {
// Order this member relative to the previous one
pcmk__order_starts(member_data->previous_member, member,
pe_order_implies_then|pe_order_runnable_left);
pcmk__order_stops(member, member_data->previous_member,
pe_order_optional|pe_order_restart);
/* In unusual circumstances (such as adding a new member to the middle
* of a group with unmanaged later members), this member may be active
* while the previous (new) member is inactive. In this situation, the
* usual restart orderings will be irrelevant, so we need to order this
* member's stop before the previous member's start.
*/
if ((member->running_on != NULL)
&& (member_data->previous_member->running_on == NULL)) {
pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
member_data->previous_member,
PCMK_ACTION_START,
pe_order_implies_first
|pe_order_runnable_left);
}
if (member_data->promotable) {
pcmk__order_resource_actions(member_data->previous_member,
PCMK_ACTION_PROMOTE, member,
PCMK_ACTION_PROMOTE,
pe_order_implies_then
|pe_order_runnable_left);
pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
member_data->previous_member,
PCMK_ACTION_DEMOTE,
pe_order_optional);
}
}
// Make sure partially active groups shut down in sequence
if (member->running_on != NULL) {
if (member_data->ordered && (member_data->previous_member != NULL)
&& (member_data->previous_member->running_on == NULL)
&& (member_data->last_active != NULL)
&& (member_data->last_active->running_on != NULL)) {
pcmk__order_stops(member, member_data->last_active,
pe_order_optional);
}
member_data->last_active = member;
}
member_data->previous_member = member;
}
/*!
* \internal
* \brief Create implicit constraints needed for a group resource
*
* \param[in,out] rsc Group resource to create implicit constraints for
*/
void
pcmk__group_internal_constraints(pe_resource_t *rsc)
{
struct member_data member_data = { false, };
const pe_resource_t *top = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
/* Order group pseudo-actions relative to each other for restarting:
* stop group -> group is stopped -> start group -> group is started
*/
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_STOPPED,
pe_order_runnable_left);
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
rsc, PCMK_ACTION_START,
pe_order_optional);
pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
rsc, PCMK_ACTION_RUNNING,
pe_order_runnable_left);
top = pe__const_top_resource(rsc, false);
member_data.ordered = pe__group_flag_is_set(rsc, pe__group_ordered);
member_data.colocated = pe__group_flag_is_set(rsc, pe__group_colocated);
member_data.promotable = pcmk_is_set(top->flags, pcmk_rsc_promotable);
g_list_foreach(rsc->children, member_internal_constraints, &member_data);
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for a group with some other resource, apply the
* score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent group resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
*/
static void
colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
const pcmk__colocation_t *colocation)
{
pe_resource_t *member = NULL;
if (dependent->children == NULL) {
return;
}
pe_rsc_trace(primary, "Processing %s (group %s with %s) for dependent",
colocation->id, dependent->id, primary->id);
if (pe__group_flag_is_set(dependent, pe__group_colocated)) {
// Colocate first member (internal colocations will handle the rest)
member = (pe_resource_t *) dependent->children->data;
member->cmds->apply_coloc_score(member, primary, colocation, true);
return;
}
if (colocation->score >= INFINITY) {
pcmk__config_err("%s: Cannot perform mandatory colocation between "
"non-colocated group and %s",
dependent->id, primary->id);
return;
}
// Colocate each member individually
for (GList *iter = dependent->children; iter != NULL; iter = iter->next) {
member = (pe_resource_t *) iter->data;
member->cmds->apply_coloc_score(member, primary, colocation, true);
}
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for some other resource with a group, apply the
* score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary group resource in colocation
* \param[in] colocation Colocation constraint to apply
*/
static void
colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const pe_resource_t *member = NULL;
pe_rsc_trace(primary,
"Processing colocation %s (%s with group %s) for primary",
colocation->id, dependent->id, primary->id);
if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
return;
}
if (pe__group_flag_is_set(primary, pe__group_colocated)) {
if (colocation->score >= INFINITY) {
/* For mandatory colocations, the entire group must be assignable
* (and in the specified role if any), so apply the colocation based
* on the last member.
*/
member = pe__last_group_member(primary);
} else if (primary->children != NULL) {
/* For optional colocations, whether the group is partially or fully
* up doesn't matter, so apply the colocation based on the first
* member.
*/
member = (pe_resource_t *) primary->children->data;
}
if (member == NULL) {
return; // Nothing to colocate with
}
member->cmds->apply_coloc_score(dependent, member, colocation, false);
return;
}
if (colocation->score >= INFINITY) {
pcmk__config_err("%s: Cannot perform mandatory colocation with"
" non-colocated group %s",
dependent->id, primary->id);
return;
}
// Colocate dependent with each member individually
for (const GList *iter = primary->children; iter != NULL;
iter = iter->next) {
member = iter->data;
member->cmds->apply_coloc_score(dependent, member, colocation, false);
}
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__group_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (for_dependent) {
colocate_group_with(dependent, primary, colocation);
} else {
// Method should only be called for primitive dependents
CRM_ASSERT(dependent->variant == pcmk_rsc_variant_primitive);
colocate_with_group(dependent, primary, colocation);
}
}
/*!
* \internal
* \brief Return action flags for a given group resource action
*
* \param[in,out] action Group action to get flags for
* \param[in] node If not NULL, limit effects to this node
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node)
{
// Default flags for a group action
uint32_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
CRM_ASSERT(action != NULL);
// Update flags considering each member's own flags for same action
for (GList *iter = action->rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *member = (pe_resource_t *) iter->data;
// Check whether member has the same action
enum action_tasks task = get_complex_task(member, action->task);
const char *task_s = task2text(task);
pe_action_t *member_action = find_first_action(member->actions, NULL,
task_s, node);
if (member_action != NULL) {
uint32_t member_flags = member->cmds->action_flags(member_action,
node);
// Group action is mandatory if any member action is
if (pcmk_is_set(flags, pe_action_optional)
&& !pcmk_is_set(member_flags, pe_action_optional)) {
pe_rsc_trace(action->rsc, "%s is mandatory because %s is",
action->uuid, member_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
pe_action_optional);
pe__clear_action_flags(action, pe_action_optional);
}
// Group action is unrunnable if any member action is
if (!pcmk__str_eq(task_s, action->task, pcmk__str_none)
&& pcmk_is_set(flags, pe_action_runnable)
&& !pcmk_is_set(member_flags, pe_action_runnable)) {
pe_rsc_trace(action->rsc, "%s is unrunnable because %s is",
action->uuid, member_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
pe_action_runnable);
pe__clear_action_flags(action, pe_action_runnable);
}
/* Group (pseudo-)actions other than stop or demote are unrunnable
* unless every member will do it.
*/
} else if ((task != pcmk_action_stop) && (task != pcmk_action_demote)) {
pe_rsc_trace(action->rsc,
"%s is not runnable because %s will not %s",
action->uuid, member->id, task_s);
pe__clear_raw_action_flags(flags, "group action",
pe_action_runnable);
}
}
return flags;
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions' flags
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pe_action_optional to affect only mandatory
* actions, and pe_action_runnable to affect only
* runnable actions)
* \param[in] type Group of enum pe_ordering flags to apply
* \param[in,out] data_set Cluster working set
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then,
const pe_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pe_working_set_t *data_set)
{
uint32_t changed = pcmk__updated_none;
// Group method can be called only on behalf of "then" action
CRM_ASSERT((first != NULL) && (then != NULL) && (then->rsc != NULL)
&& (data_set != NULL));
// Update the actions for the group itself
changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
type, data_set);
// Update the actions for each group member
for (GList *iter = then->rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *member = (pe_resource_t *) iter->data;
pe_action_t *member_action = find_first_action(member->actions, NULL,
then->task, node);
if (member_action != NULL) {
changed |= member->cmds->update_ordered_actions(first,
member_action, node,
flags, filter, type,
data_set);
}
}
return changed;
}
/*!
* \internal
* \brief Apply a location constraint to a group's allowed node scores
*
* \param[in,out] rsc Group resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
void
pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location)
{
GList *node_list_orig = NULL;
GList *node_list_copy = NULL;
bool reset_scores = true;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
&& (location != NULL));
node_list_orig = location->node_list_rh;
node_list_copy = pcmk__copy_node_list(node_list_orig, true);
reset_scores = pe__group_flag_is_set(rsc, pe__group_colocated);
// Apply the constraint for the group itself (updates node scores)
pcmk__apply_location(rsc, location);
// Apply the constraint for each member
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *member = (pe_resource_t *) iter->data;
member->cmds->apply_location(member, location);
if (reset_scores) {
/* The first member of colocated groups needs to use the original
* node scores, but subsequent members should work on a copy, since
* the first member's scores already incorporate theirs.
*/
reset_scores = false;
location->node_list_rh = node_list_copy;
}
}
location->node_list_rh = node_list_orig;
g_list_free_full(node_list_copy, free);
}
// Group implementation of resource_alloc_functions_t:colocated_resources()
GList *
pcmk__group_colocated_resources(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc,
GList *colocated_rscs)
{
const pe_resource_t *member = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
if (pe__group_flag_is_set(rsc, pe__group_colocated)
|| pe_rsc_is_clone(rsc->parent)) {
/* This group has colocated members and/or is cloned -- either way,
* add every child's colocated resources to the list. The first and last
* members will include the group's own colocations.
*/
colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc);
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
member = (const pe_resource_t *) iter->data;
colocated_rscs = member->cmds->colocated_resources(member, orig_rsc,
colocated_rscs);
}
} else if (rsc->children != NULL) {
/* This group's members are not colocated, and the group is not cloned,
* so just add the group's own colocations to the list.
*/
colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc,
colocated_rscs);
}
return colocated_rscs;
}
// Group implementation of resource_alloc_functions_t:with_this_colocations()
void
pcmk__with_group_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
&& (orig_rsc != NULL) && (list != NULL));
// Ignore empty groups
if (rsc->children == NULL) {
return;
}
/* "With this" colocations are needed only for the group itself and for its
* last member. (Previous members will chain via the group internal
* colocations.)
*/
if ((orig_rsc != rsc) && (orig_rsc != pe__last_group_member(rsc))) {
return;
}
pe_rsc_trace(rsc, "Adding 'with %s' colocations to list for %s",
rsc->id, orig_rsc->id);
// Add the group's own colocations
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
// If cloned, add any relevant colocations with the clone
if (rsc->parent != NULL) {
rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc,
list);
}
if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
// @COMPAT Non-colocated groups are deprecated
return;
}
// Add explicit colocations with the group's (other) children
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
const pe_resource_t *member = iter->data;
if (member != orig_rsc) {
member->cmds->with_this_colocations(member, orig_rsc, list);
}
}
}
// Group implementation of resource_alloc_functions_t:this_with_colocations()
void
pcmk__group_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
const pe_resource_t *member = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
&& (orig_rsc != NULL) && (list != NULL));
// Ignore empty groups
if (rsc->children == NULL) {
return;
}
/* "This with" colocations are normally needed only for the group itself and
* for its first member.
*/
if ((rsc == orig_rsc)
|| (orig_rsc == (const pe_resource_t *) rsc->children->data)) {
pe_rsc_trace(rsc, "Adding '%s with' colocations to list for %s",
rsc->id, orig_rsc->id);
// Add the group's own colocations
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
// If cloned, add any relevant colocations involving the clone
if (rsc->parent != NULL) {
rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc,
list);
}
if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
// @COMPAT Non-colocated groups are deprecated
return;
}
// Add explicit colocations involving the group's (other) children
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
member = iter->data;
if (member != orig_rsc) {
member->cmds->this_with_colocations(member, orig_rsc, list);
}
}
return;
}
/* Later group members honor the group's colocations indirectly, due to the
* internal group colocations that chain everything from the first member.
* However, if an earlier group member is unmanaged, this chaining will not
* happen, so the group's mandatory colocations must be explicitly added.
*/
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
member = iter->data;
if (orig_rsc == member) {
break; // We've seen all earlier members, and none are unmanaged
}
if (!pcmk_is_set(member->flags, pcmk_rsc_managed)) {
crm_trace("Adding mandatory '%s with' colocations to list for "
"member %s because earlier member %s is unmanaged",
rsc->id, orig_rsc->id, member->id);
for (const GList *cons_iter = rsc->rsc_cons; cons_iter != NULL;
cons_iter = cons_iter->next) {
const pcmk__colocation_t *colocation = NULL;
colocation = (const pcmk__colocation_t *) cons_iter->data;
if (colocation->score == INFINITY) {
pcmk__add_this_with(list, colocation, orig_rsc);
}
}
// @TODO Add mandatory (or all?) clone constraints if cloned
break;
}
}
}
/*!
* \internal
* \brief Update nodes with scores of colocated resources' nodes
*
* Given a table of nodes and a resource, update the nodes' scores with the
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
* \param[in,out] source_rsc Group resource whose node scores to add
* \param[in] target_rsc Resource on whose behalf to update \p *nodes
* \param[in] log_id Resource ID for logs (if \c NULL, use
* \p source_rsc ID)
* \param[in,out] nodes Nodes to update (set initial contents to \c NULL
* to copy allowed nodes from \p source_rsc)
* \param[in] colocation Original colocation constraint (used to get
* configured primary resource's stickiness, and
* to get colocation node attribute; if \c NULL,
* <tt>source_rsc</tt>'s own matching node scores will
* not be added, and \p *nodes must be \c NULL as
* well)
* \param[in] factor Incorporate scores multiplied by this factor
* \param[in] flags Bitmask of enum pcmk__coloc_select values
*
* \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
* the \c pcmk__coloc_select_this_with flag are used together (and only by
* \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
* \note This is the group implementation of
* \c resource_alloc_functions_t:add_colocated_node_scores().
*/
void
pcmk__group_add_colocated_node_scores(pe_resource_t *source_rsc,
const pe_resource_t *target_rsc,
const char *log_id, GHashTable **nodes,
const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
pe_resource_t *member = NULL;
CRM_ASSERT((source_rsc != NULL)
&& (source_rsc->variant == pcmk_rsc_variant_group)
&& (nodes != NULL)
&& ((colocation != NULL)
|| ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
log_id = source_rsc->id;
}
// Avoid infinite recursion
if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
log_id, source_rsc->id);
return;
}
pe__set_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
// Ignore empty groups (only possible with schema validation disabled)
if (source_rsc->children == NULL) {
return;
}
/* Refer the operation to the first or last member as appropriate.
*
* cmp_resources() is the only caller that passes a NULL nodes table,
* and is also the only caller using pcmk__coloc_select_this_with.
* For "this with" colocations, the last member will recursively incorporate
* all the other members' "this with" colocations via the internal group
* colocations (and via the first member, the group's own colocations).
*
* For "with this" colocations, the first member works similarly.
*/
if (*nodes == NULL) {
member = pe__last_group_member(source_rsc);
} else {
member = source_rsc->children->data;
}
pe_rsc_trace(source_rsc, "%s: Merging scores from group %s using member %s "
"(at %.6f)", log_id, source_rsc->id, member->id, factor);
member->cmds->add_colocated_node_scores(member, target_rsc, log_id, nodes,
colocation, factor, flags);
pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
}
// Group implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__group_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
pe_resource_t *member = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
&& (orig_rsc != NULL) && (utilization != NULL));
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
orig_rsc->id, rsc->id);
if (pe__group_flag_is_set(rsc, pe__group_colocated)
|| pe_rsc_is_clone(rsc->parent)) {
// Every group member will be on same node, so sum all members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
member = (pe_resource_t *) iter->data;
if (pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->cmds->add_utilization(member, orig_rsc, all_rscs,
utilization);
}
}
} else if (rsc->children != NULL) {
// Just add first member's utilization
member = (pe_resource_t *) rsc->children->data;
if ((member != NULL)
&& pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->cmds->add_utilization(member, orig_rsc, all_rscs,
utilization);
}
}
}
// Group implementation of resource_alloc_functions_t:shutdown_lock()
void
pcmk__group_shutdown_lock(pe_resource_t *rsc)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
pe_resource_t *member = (pe_resource_t *) iter->data;
member->cmds->shutdown_lock(member);
}
}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 455b7bc8fd..025cbe4816 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1185 +1,1185 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/scheduler_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean);
resource_object_functions_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_print,
native_active,
native_resource_state,
native_location,
native_free,
pe__count_common,
pe__native_is_filtered,
active_node,
pe__primitive_max_per_node,
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_print,
group_active,
group_resource_state,
native_location,
group_free,
pe__count_common,
pe__group_is_filtered,
active_node,
pe__group_max_per_node,
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free,
pe__count_common,
pe__clone_is_filtered,
active_node,
pe__clone_max_per_node,
},
{
pe__unpack_bundle,
native_find_rsc,
native_parameter,
pe__print_bundle,
pe__bundle_active,
pe__bundle_resource_state,
native_location,
pe__free_bundle,
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
pe__bundle_max_per_node,
}
};
static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
return pcmk_rsc_variant_primitive;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
return pcmk_rsc_variant_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
return pcmk_rsc_variant_bundle;
}
return pcmk_rsc_variant_unknown;
}
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
add_hash_param(user_data, key, value);
}
static void
expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
pe_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
}
/* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
rule_data, parent_orig_meta, NULL, FALSE, data_set);
p = p->parent;
}
/* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
if (parent_orig_meta != NULL) {
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, parent_orig_meta);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
/* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
/* Attributes that already exist in the child lease are not updated. */
dup_attr(key, value, meta_hash);
}
}
if (parent_orig_meta != NULL) {
g_hash_table_destroy(parent_orig_meta);
}
return ;
}
void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
const char *prop_value = pcmk__xml_attr_value(a);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
}
/* check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
if (rsc->parent) {
g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
}
}
void
get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
const pe_node_t *node, pe_working_set_t *data_set)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
}
}
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, "name");
const char *role = crm_element_value(op, "role");
char *key = NULL;
if ((role == NULL)
|| pcmk__strcase_any_of(role, PCMK__ROLE_STARTED, PCMK__ROLE_UNPROMOTED,
PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
role = PCMK__ROLE_UNKNOWN;
}
key = crm_strdup_printf("%s-%s", name, role);
return key;
}
static gboolean
unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *clone = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
}
template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
XML_ATTR_ID, template_ref);
if (template == NULL) {
pe_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_add(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
}
template_ops = find_xml_node(new_xml, "operations", FALSE);
for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
child_xml = pcmk__xe_next(child_xml)) {
xmlNode *new_child = NULL;
new_child = add_node_copy(new_xml, child_xml);
if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = pcmk__xe_first_child(template_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
add_node_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
free_xml(template_ops);
}
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
/* Disable multi-level templates for now */
/*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
} */
return TRUE;
}
static gboolean
add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for processing resource list of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
static bool
detect_promotable(pe_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
if (crm_is_true(promotable)) {
return TRUE;
}
// @COMPAT deprecated since 2.0.0
if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
strdup(XML_BOOLEAN_TRUE));
return TRUE;
}
return FALSE;
}
static void
free_params_table(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \brief Get a table of resource parameters
*
* \param[in,out] rsc Resource to query
* \param[in] node Node for evaluating rules (NULL for defaults)
* \param[in,out] data_set Cluster working set
*
* \return Hash table containing resource parameter names and values
* (or NULL if \p rsc or \p data_set is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
pe_working_set_t *data_set)
{
GHashTable *params_on_node = NULL;
/* A NULL node is used to request the resource's default parameters
* (not evaluated for node), but we always want something non-NULL
* as a hash table key.
*/
const char *node_name = "";
// Sanity check
if ((rsc == NULL) || (data_set == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
node_name = node->details->uname;
}
// Find the parameter table for given node
if (rsc->parameter_cache == NULL) {
rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
} else {
params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
}
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
get_rsc_attributes(params_on_node, rsc, node, data_set);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
return params_on_node;
}
/*!
* \internal
* \brief Unpack a resource's "requires" meta-attribute
*
* \param[in,out] rsc Resource being unpacked
* \param[in] value Value of "requires" meta-attribute
* \param[in] is_default Whether \p value was selected by default
*/
static void
unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else if (!pcmk_is_set(rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else {
pe__set_resource_flags(rsc,
pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
} else if ((rsc->variant == pcmk_rsc_variant_primitive)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
} else if (pcmk_is_set(rsc->cluster->flags,
pcmk_sched_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
} else if (pcmk_is_set(rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
value = PCMK__VALUE_FENCING;
} else if (rsc->cluster->no_quorum_policy == pcmk_no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
value = PCMK__VALUE_QUORUM;
}
if (orig_value != NULL) {
pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to '%s' because '%s' is not valid",
rsc->id, value, orig_value);
}
unpack_requires(rsc, value, true);
return;
}
pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
(is_default? " (default)" : ""));
}
#ifndef PCMK__COMPAT_2_0
static void
warn_about_deprecated_classes(pe_resource_t *rsc)
{
const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
pe_warn_once(pe_wo_upstart,
"Support for Upstart resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
} else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
pe_warn_once(pe_wo_nagios,
"Support for Nagios resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
}
}
#endif
/*!
* \internal
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
* \c pe_resource_t object.
*
* \param[in] xml_obj XML node containing the resource's configuration
* \param[out] rsc Where to store the unpacked resource information
* \param[in] parent Resource's parent, if any
* \param[in,out] data_set Cluster working set
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
* the caller is responsible for freeing it using its variant-specific
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_resource_t *parent, pe_working_set_t *data_set)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
const char *value = NULL;
const char *id = NULL;
bool guest_node = false;
bool remote_node = false;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
CRM_CHECK(rsc != NULL, return EINVAL);
CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
*rsc = NULL;
return EINVAL);
rule_data.now = data_set->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
xml_obj->name);
return pcmk_rc_unpack_error;
}
if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
return pcmk_rc_unpack_error;
}
*rsc = calloc(1, sizeof(pe_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
(*rsc)->cluster = data_set;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
(*rsc)->xml = expanded_xml;
(*rsc)->orig_xml = xml_obj;
} else {
(*rsc)->xml = xml_obj;
(*rsc)->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
(*rsc)->ops_xml = expand_idref(ops, data_set->input);
(*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
if ((*rsc)->variant == pcmk_rsc_variant_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
id, (*rsc)->xml->name);
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
#ifndef PCMK__COMPAT_2_0
warn_about_deprecated_classes(*rsc);
#endif
(*rsc)->meta = pcmk__strkey_table(free, free);
(*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
(*rsc)->known_on = pcmk__strkey_table(NULL, free);
value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
if (value) {
(*rsc)->id = crm_strdup_printf("%s:%s", id, value);
add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
} else {
(*rsc)->id = strdup(id);
}
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
(*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
(*rsc)->flags = 0;
pe__set_resource_flags(*rsc, pe_rsc_runnable|pcmk_rsc_unassigned);
if (!pcmk_is_set(data_set->flags, pcmk_sched_in_maintenance)) {
pe__set_resource_flags(*rsc, pcmk_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = pcmk_role_stopped;
(*rsc)->next_role = pcmk_role_unknown;
(*rsc)->recovery_type = pcmk_multiply_active_restart;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
(*rsc)->priority = char2score(value);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_critical);
+ pe__set_resource_flags(*rsc, pcmk_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pcmk_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
guest_node = true;
} else {
remote_node = true;
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pcmk_rsc_managed);
} else {
pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pcmk_is_set(data_set->flags, pcmk_sched_in_maintenance)) {
pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
if (detect_promotable(*rsc)) {
pe__set_resource_flags(*rsc, pcmk_rsc_promotable);
}
} else {
pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
pe_warn_once(pe_wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
(*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_stop;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
} else { // "stop_start"
if (!pcmk__str_eq(value, "stop_start",
pcmk__str_casei|pcmk__str_null_matches)) {
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
(*rsc)->recovery_type = pcmk_multiply_active_restart;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->stickiness = char2score(value);
}
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MIGRATION_THRESHOLD);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
/* @TODO We use 1 here to preserve previous behavior, but this
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
pe_warn_once(pe_wo_neg_threshold,
PCMK_META_MIGRATION_THRESHOLD
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
}
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe__set_working_set_flags(data_set, pcmk_sched_have_fencing);
pe__set_resource_flags(*rsc, pcmk_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
value = g_hash_table_lookup((*rsc)->meta, PCMK_META_FAILURE_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
* reconnect_interval.
*
* @TODO Evaluate per node before using
*/
value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
(*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
/* we want to override any default failure_timeout in use when remote
* reconnect_interval is in use. */
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
(*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
if (pcmk_is_set(data_set->flags, pcmk_sched_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
pcmk_is_set((*rsc)->flags, pcmk_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
(*rsc)->utilization, NULL, FALSE, data_set);
if (expanded_xml) {
if (add_template_rsc(xml_obj, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
}
return pcmk_rc_ok;
}
gboolean
is_parent(pe_resource_t *child, pe_resource_t *rsc)
{
pe_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
while (parent->parent != NULL) {
if (parent->parent == rsc) {
return TRUE;
}
parent = parent->parent;
}
return FALSE;
}
pe_resource_t *
uber_parent(pe_resource_t * rsc)
{
pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while ((parent->parent != NULL)
&& (parent->parent->variant != pcmk_rsc_variant_bundle)) {
parent = parent->parent;
}
return parent;
}
/*!
* \internal
* \brief Get the topmost parent of a resource as a const pointer
*
* \param[in] rsc Resource to check
* \param[in] include_bundle If true, go all the way to bundle
*
* \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
const pe_resource_t *
pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
{
const pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
if (!include_bundle
&& (parent->parent->variant == pcmk_rsc_variant_bundle)) {
break;
}
parent = parent->parent;
}
return parent;
}
void
common_free(pe_resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameter_cache != NULL) {
g_hash_table_destroy(rsc->parameter_cache);
}
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
if ((rsc->parent == NULL)
&& pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
rsc->orig_xml = NULL;
/* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
} else if (rsc->orig_xml) {
free_xml(rsc->xml);
rsc->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
pe_rsc_trace(rsc, "Resource freed");
free(rsc->id);
free(rsc->clone_name);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc);
}
/*!
* \internal
* \brief Count a node and update most preferred to it as appropriate
*
* \param[in] rsc An active resource
* \param[in] node A node that \p rsc is active on
* \param[in,out] active This will be set to \p node if \p node is more
* preferred than the current value
* \param[in,out] count_all If not NULL, this will be incremented
* \param[in,out] count_clean If not NULL, this will be incremented if \p node
* is online and clean
*
* \return true if the count should continue, or false if sufficiently known
*/
bool
pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
pe_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
bool is_happy = false;
CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
return false);
is_happy = node->details->online && !node->details->unclean;
if (count_all != NULL) {
++*count_all;
}
if ((count_clean != NULL) && is_happy) {
++*count_clean;
}
if ((count_all != NULL) || (count_clean != NULL)) {
keep_looking = true; // We're counting, so go through entire list
}
if (rsc->partial_migration_source != NULL) {
if (node->details == rsc->partial_migration_source->details) {
*active = node; // This is the migration source
} else {
keep_looking = true;
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
} else {
keep_looking = true;
}
}
if (*active == NULL) {
*active = node; // This is the first node checked
}
return keep_looking;
}
// Shared implementation of resource_object_functions_t:active_node()
static pe_node_t *
active_node(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
pe_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
}
if (count_clean != NULL) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
}
return active;
}
/*!
* \brief
* \internal Find and count active nodes according to "requires"
*
* \param[in] rsc Resource to check
* \param[out] count If not NULL, will be set to count of active nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
* \note This is a convenience wrapper for active_node() where the count of all
* active nodes or only clean active nodes is desired according to the
* "requires" meta-attribute.
*/
pe_node_t *
pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
*count = 0;
}
return NULL;
} else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
return rsc->fns->active_node(rsc, count, NULL);
} else {
return rsc->fns->active_node(rsc, NULL, count);
}
}
void
pe__count_common(pe_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
((pe_resource_t *) item->data)->fns->count(item->data);
}
} else if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
|| (rsc->role > pcmk_role_stopped)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
rsc->cluster->blocked_resources++;
}
}
}
/*!
* \internal
* \brief Update a resource's next role
*
* \param[in,out] rsc Resource to be updated
* \param[in] role Resource's new next role
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
rsc->id, role2text(rsc->next_role), role2text(role), why);
rsc->next_role = role;
}
}

File Metadata

Mime Type
text/x-diff
Expires
Mon, Apr 21, 6:58 PM (22 h, 53 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1661076
Default Alt Text
(173 KB)

Event Timeline