Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h
index cb10417e16..f55141ea14 100644
--- a/include/crm/pengine/common.h
+++ b/include/crm/pengine/common.h
@@ -1,194 +1,195 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_COMMON__H
# define PE_COMMON__H
#ifdef __cplusplus
extern "C" {
#endif
# include <glib.h>
# include <regex.h>
# include <crm/common/iso8601.h>
extern gboolean was_processing_error;
extern gboolean was_processing_warning;
/* The order is (partially) significant here; the values from action_fail_ignore
* through action_fail_fence are in order of increasing severity.
*
* @COMPAT The values should be ordered and numbered per the "TODO" comments
* below, so all values are in order of severity and there is room for
* future additions, but that would break API compatibility.
* @TODO For now, we just use a function to compare the values specially, but
* at the next compatibility break, we should arrange things properly.
*/
enum action_fail_response {
action_fail_ignore, // @TODO = 10
// @TODO action_fail_demote = 20,
action_fail_recover, // @TODO = 30
// @TODO action_fail_reset_remote = 40,
// @TODO action_fail_restart_container = 50,
action_fail_migrate, // @TODO = 60
action_fail_block, // @TODO = 70
action_fail_stop, // @TODO = 80
action_fail_standby, // @TODO = 90
action_fail_fence, // @TODO = 100
// @COMPAT Values below here are out of order for API compatibility
action_fail_restart_container,
/* This is reserved for internal use for remote node connection resources.
* Fence the remote node if stonith is enabled, otherwise attempt to recover
* the connection resource. This allows us to specify types of connection
* resource failures that should result in fencing the remote node
* (for example, recurring monitor failures).
*/
action_fail_reset_remote,
action_fail_demote,
};
/* the "done" action must be the "pre" action +1 */
enum action_tasks {
no_action,
monitor_rsc,
stop_rsc,
stopped_rsc,
start_rsc,
started_rsc,
action_notify,
action_notified,
action_promote,
action_promoted,
action_demote,
action_demoted,
shutdown_crm,
stonith_node
};
enum rsc_recovery_type {
recovery_stop_start,
recovery_stop_only,
recovery_block
};
enum rsc_start_requirement {
rsc_req_nothing, /* Allowed by custom_action() */
rsc_req_quorum, /* Enforced by custom_action() */
rsc_req_stonith /* Enforced by native_start_constraints() */
};
//! Possible roles that a resource can be in
enum rsc_role_e {
RSC_ROLE_UNKNOWN = 0,
RSC_ROLE_STOPPED = 1,
RSC_ROLE_STARTED = 2,
RSC_ROLE_UNPROMOTED = 3,
RSC_ROLE_PROMOTED = 4,
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
//! \deprecated Use RSC_ROLE_UNPROMOTED instead
RSC_ROLE_SLAVE = RSC_ROLE_UNPROMOTED,
//! \deprecated Use RSC_ROLE_PROMOTED instead
RSC_ROLE_MASTER = RSC_ROLE_PROMOTED,
#endif
};
# define RSC_ROLE_MAX (RSC_ROLE_PROMOTED + 1)
# define RSC_ROLE_UNKNOWN_S "Unknown"
# define RSC_ROLE_STOPPED_S "Stopped"
# define RSC_ROLE_STARTED_S "Started"
# define RSC_ROLE_SLAVE_S "Slave"
# define RSC_ROLE_MASTER_S "Master"
+# define RSC_ROLE_PROMOTED_LEGACY_S "Master"
enum pe_print_options {
pe_print_log = (1 << 0), //! \deprecated
pe_print_html = (1 << 1), //! \deprecated
pe_print_ncurses = (1 << 2), //! \deprecated
pe_print_printf = (1 << 3), //! \deprecated
pe_print_dev = (1 << 4), //! \deprecated Ignored
pe_print_details = (1 << 5), //! \deprecated Ignored
pe_print_max_details = (1 << 6), //! \deprecated Ignored
pe_print_rsconly = (1 << 7),
pe_print_ops = (1 << 8),
pe_print_suppres_nl = (1 << 9),
pe_print_xml = (1 << 10), //! \deprecated
pe_print_brief = (1 << 11),
pe_print_pending = (1 << 12),
pe_print_clone_details = (1 << 13),
pe_print_clone_active = (1 << 14), // Print clone instances only if active
pe_print_implicit = (1 << 15) // Print implicitly created resources
};
const char *task2text(enum action_tasks task);
enum action_tasks text2task(const char *task);
enum rsc_role_e text2role(const char *role);
const char *role2text(enum rsc_role_e role);
const char *fail2text(enum action_fail_response fail);
const char *pe_pref(GHashTable * options, const char *name);
void calculate_active_ops(GList * sorted_op_list, int *start_index, int *stop_index);
static inline const char *
recovery2text(enum rsc_recovery_type type)
{
switch (type) {
case recovery_stop_only:
return "shutting it down";
case recovery_stop_start:
return "attempting recovery";
case recovery_block:
return "waiting for an administrator";
}
return "Unknown";
}
typedef struct pe_re_match_data {
char *string;
int nregs;
regmatch_t *pmatch;
} pe_re_match_data_t;
typedef struct pe_match_data {
pe_re_match_data_t *re;
GHashTable *params;
GHashTable *meta;
} pe_match_data_t;
typedef struct pe_rsc_eval_data {
const char *standard;
const char *provider;
const char *agent;
} pe_rsc_eval_data_t;
typedef struct pe_op_eval_data {
const char *op_name;
guint interval;
} pe_op_eval_data_t;
typedef struct pe_rule_eval_data {
GHashTable *node_hash;
enum rsc_role_e role;
crm_time_t *now;
pe_match_data_t *match_data;
pe_rsc_eval_data_t *rsc_data;
pe_op_eval_data_t *op_data;
} pe_rule_eval_data_t;
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index a1d616b443..32f731a2bd 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,631 +1,631 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <glib.h>
#include <crm/pengine/internal.h>
gboolean was_processing_error = FALSE;
gboolean was_processing_warning = FALSE;
static bool
check_health(const char *value)
{
return pcmk__strcase_any_of(value, "none", "custom", "only-green", "progressive",
"migrate-on-red", NULL);
}
static bool
check_stonith_action(const char *value)
{
return pcmk__strcase_any_of(value, "reboot", "poweroff", "off", NULL);
}
static bool
check_placement_strategy(const char *value)
{
return pcmk__strcase_any_of(value, "default", "utilization", "minimal",
"balanced", NULL);
}
static pcmk__cluster_option_t pe_opts[] = {
/* name, old name, type, allowed values,
* default value, validator,
* short description,
* long description
*/
{
"no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide",
"stop", pcmk__valid_quorum,
"What to do when the cluster does not have quorum",
NULL
},
{
"symmetric-cluster", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"Whether resources can run on any node by default",
NULL
},
{
"maintenance-mode", NULL, "boolean", NULL,
"false", pcmk__valid_boolean,
"Whether the cluster should refrain from monitoring, starting, "
"and stopping resources",
NULL
},
{
"start-failure-is-fatal", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"Whether a start failure should prevent a resource from being "
"recovered on the same node",
"When true, the cluster will immediately ban a resource from a node "
"if it fails to start there. When false, the cluster will instead "
"check the resource's fail count against its migration-threshold."
},
{
"enable-startup-probes", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"Whether the cluster should check for active resources during start-up",
NULL
},
{
XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL,
"false", pcmk__valid_boolean,
"Whether to lock resources to a cleanly shut down node",
"When true, resources active on a node when it is cleanly shut down "
"are kept \"locked\" to that node (not allowed to run elsewhere) "
"until they start again on that node after it rejoins (or for at "
"most shutdown-lock-limit, if set). Stonith resources and "
"Pacemaker Remote connections are never locked. Clone and bundle "
"instances and the master role of promotable clones are currently "
"never locked, though support could be added in a future release."
},
{
XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL,
"0", pcmk__valid_interval_spec,
"Do not lock resources to a cleanly shut down node longer than this",
"If shutdown-lock is true and this is set to a nonzero time duration, "
"shutdown locks will expire after this much time has passed since "
"the shutdown was initiated, even if the node has not rejoined."
},
// Fencing-related options
{
"stonith-enabled", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"*** Advanced Use Only *** "
"Whether nodes may be fenced as part of recovery",
"If false, unresponsive nodes are immediately assumed to be harmless, "
"and resources that were active on them may be recovered "
"elsewhere. This can result in a \"split-brain\" situation, "
"potentially leading to data loss and/or service unavailability."
},
{
"stonith-action", NULL, "enum", "reboot, off, poweroff",
"reboot", check_stonith_action,
"Action to send to fence device when a node needs to be fenced "
"(\"poweroff\" is a deprecated alias for \"off\")",
NULL
},
{
"stonith-timeout", NULL, "time", NULL,
"60s", pcmk__valid_interval_spec,
"*** Advanced Use Only *** Unused by Pacemaker",
"This value is not used by Pacemaker, but is kept for backward "
"compatibility, and certain legacy fence agents might use it."
},
{
XML_ATTR_HAVE_WATCHDOG, NULL, "boolean", NULL,
"false", pcmk__valid_boolean,
"Whether watchdog integration is enabled",
"This is set automatically by the cluster according to whether SBD "
"is detected to be in use. User-configured values are ignored. "
"The value `true` is meaningful if diskless SBD is used and "
"`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
"is required, watchdog-based self-fencing will be performed via "
"SBD without requiring a fencing resource explicitly configured."
},
{
"concurrent-fencing", NULL, "boolean", NULL,
PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
"Allow performing fencing operations in parallel",
NULL
},
{
"startup-fencing", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"*** Advanced Use Only *** Whether to fence unseen nodes at start-up",
"Setting this to false may lead to a \"split-brain\" situation,"
"potentially leading to data loss and/or service unavailability."
},
{
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY, NULL, "time", NULL,
"0", pcmk__valid_interval_spec,
"Apply fencing delay targeting the lost nodes with the highest total resource priority",
"Apply specified delay for the fencings that are targeting the lost "
"nodes with the highest total resource priority in case we don't "
"have the majority of the nodes in our cluster partition, so that "
"the more significant nodes potentially win any fencing match, "
"which is especially meaningful under split-brain of 2-node "
"cluster. A promoted resource instance takes the base priority + 1 "
"on calculation if the base priority is not 0. Any static/random "
"delays that are introduced by `pcmk_delay_base/max` configured "
"for the corresponding fencing resources will be added to this "
"delay. This delay should be significantly greater than, safely "
"twice, the maximum `pcmk_delay_base/max`. By default, priority "
"fencing delay is disabled."
},
{
"cluster-delay", NULL, "time", NULL,
"60s", pcmk__valid_interval_spec,
"Maximum time for node-to-node communication",
"The node elected Designated Controller (DC) will consider an action "
"failed if it does not get a response from the node executing the "
"action within this time (after considering the action's own "
"timeout). The \"correct\" value will depend on the speed and "
"load of your network and cluster nodes."
},
{
"batch-limit", NULL, "integer", NULL,
"0", pcmk__valid_number,
"Maximum number of jobs that the cluster may execute in parallel "
"across all nodes",
"The \"correct\" value will depend on the speed and load of your "
"network and cluster nodes. If set to 0, the cluster will "
"impose a dynamically calculated limit when any node has a "
"high load."
},
{
"migration-limit", NULL, "integer", NULL,
"-1", pcmk__valid_number,
"The number of live migration actions that the cluster is allowed "
"to execute in parallel on a node (-1 means no limit)"
},
/* Orphans and stopping */
{
"stop-all-resources", NULL, "boolean", NULL,
"false", pcmk__valid_boolean,
"Whether the cluster should stop all active resources",
NULL
},
{
"stop-orphan-resources", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"Whether to stop resources that were removed from the configuration",
NULL
},
{
"stop-orphan-actions", NULL, "boolean", NULL,
"true", pcmk__valid_boolean,
"Whether to cancel recurring actions removed from the configuration",
NULL
},
{
"remove-after-stop", NULL, "boolean", NULL,
"false", pcmk__valid_boolean,
"*** Deprecated *** Whether to remove stopped resources from "
"the executor",
"Values other than default are poorly tested and potentially dangerous."
" This option will be removed in a future release."
},
/* Storing inputs */
{
"pe-error-series-max", NULL, "integer", NULL,
"-1", pcmk__valid_number,
"The number of scheduler inputs resulting in errors to save",
"Zero to disable, -1 to store unlimited."
},
{
"pe-warn-series-max", NULL, "integer", NULL,
"5000", pcmk__valid_number,
"The number of scheduler inputs resulting in warnings to save",
"Zero to disable, -1 to store unlimited."
},
{
"pe-input-series-max", NULL, "integer", NULL,
"4000", pcmk__valid_number,
"The number of scheduler inputs without errors or warnings to save",
"Zero to disable, -1 to store unlimited."
},
/* Node health */
{
"node-health-strategy", NULL, "enum",
"none, migrate-on-red, only-green, progressive, custom",
"none", check_health,
"How cluster should react to node health attributes",
"Requires external entities to create node attributes (named with "
"the prefix \"#health\") with values \"red\", \"yellow\" or "
"\"green\"."
},
{
"node-health-base", NULL, "integer", NULL,
"0", pcmk__valid_number,
"Base health score assigned to a node",
"Only used when node-health-strategy is set to progressive."
},
{
"node-health-green", NULL, "integer", NULL,
"0", pcmk__valid_number,
"The score to use for a node health attribute whose value is \"green\"",
"Only used when node-health-strategy is set to custom or progressive."
},
{
"node-health-yellow", NULL, "integer", NULL,
"0", pcmk__valid_number,
"The score to use for a node health attribute whose value is \"yellow\"",
"Only used when node-health-strategy is set to custom or progressive."
},
{
"node-health-red", NULL, "integer", NULL,
"-INFINITY", pcmk__valid_number,
"The score to use for a node health attribute whose value is \"red\"",
"Only used when node-health-strategy is set to custom or progressive."
},
/*Placement Strategy*/
{
"placement-strategy", NULL, "enum",
"default, utilization, minimal, balanced",
"default", check_placement_strategy,
"How the cluster should allocate resources to nodes",
NULL
},
};
void
pe_metadata(void)
{
pcmk__print_option_metadata("pacemaker-schedulerd", "1.0",
"Pacemaker scheduler options",
"Cluster options used by Pacemaker's scheduler"
" (formerly called pengine)",
pe_opts, PCMK__NELEM(pe_opts));
}
void
verify_pe_options(GHashTable * options)
{
pcmk__validate_cluster_options(options, pe_opts, PCMK__NELEM(pe_opts));
}
const char *
pe_pref(GHashTable * options, const char *name)
{
return pcmk__cluster_option(options, pe_opts, PCMK__NELEM(pe_opts), name);
}
const char *
fail2text(enum action_fail_response fail)
{
const char *result = "<unknown>";
switch (fail) {
case action_fail_ignore:
result = "ignore";
break;
case action_fail_demote:
result = "demote";
break;
case action_fail_block:
result = "block";
break;
case action_fail_recover:
result = "recover";
break;
case action_fail_migrate:
result = "migrate";
break;
case action_fail_stop:
result = "stop";
break;
case action_fail_fence:
result = "fence";
break;
case action_fail_standby:
result = "standby";
break;
case action_fail_restart_container:
result = "restart-container";
break;
case action_fail_reset_remote:
result = "reset-remote";
break;
}
return result;
}
enum action_tasks
text2task(const char *task)
{
if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
return stop_rsc;
} else if (pcmk__str_eq(task, CRMD_ACTION_STOPPED, pcmk__str_casei)) {
return stopped_rsc;
} else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
return start_rsc;
} else if (pcmk__str_eq(task, CRMD_ACTION_STARTED, pcmk__str_casei)) {
return started_rsc;
} else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
return shutdown_crm;
} else if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
return stonith_node;
} else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
return monitor_rsc;
} else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei)) {
return action_notify;
} else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFIED, pcmk__str_casei)) {
return action_notified;
} else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
return action_promote;
} else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
return action_demote;
} else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTED, pcmk__str_casei)) {
return action_promoted;
} else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTED, pcmk__str_casei)) {
return action_demoted;
}
#if SUPPORT_TRACING
if (pcmk__str_eq(task, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
return no_action;
} else if (pcmk__str_eq(task, CRMD_ACTION_DELETE, pcmk__str_casei)) {
return no_action;
} else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
return no_action;
} else if (pcmk__str_eq(task, CRM_OP_PROBED, pcmk__str_casei)) {
return no_action;
} else if (pcmk__str_eq(task, CRM_OP_LRM_REFRESH, pcmk__str_casei)) {
return no_action;
} else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
return no_action;
} else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
return no_action;
}
crm_trace("Unsupported action: %s", task);
#endif
return no_action;
}
const char *
task2text(enum action_tasks task)
{
const char *result = "<unknown>";
switch (task) {
case no_action:
result = "no_action";
break;
case stop_rsc:
result = CRMD_ACTION_STOP;
break;
case stopped_rsc:
result = CRMD_ACTION_STOPPED;
break;
case start_rsc:
result = CRMD_ACTION_START;
break;
case started_rsc:
result = CRMD_ACTION_STARTED;
break;
case shutdown_crm:
result = CRM_OP_SHUTDOWN;
break;
case stonith_node:
result = CRM_OP_FENCE;
break;
case monitor_rsc:
result = CRMD_ACTION_STATUS;
break;
case action_notify:
result = CRMD_ACTION_NOTIFY;
break;
case action_notified:
result = CRMD_ACTION_NOTIFIED;
break;
case action_promote:
result = CRMD_ACTION_PROMOTE;
break;
case action_promoted:
result = CRMD_ACTION_PROMOTED;
break;
case action_demote:
result = CRMD_ACTION_DEMOTE;
break;
case action_demoted:
result = CRMD_ACTION_DEMOTED;
break;
}
return result;
}
const char *
role2text(enum rsc_role_e role)
{
switch (role) {
case RSC_ROLE_UNKNOWN:
return RSC_ROLE_UNKNOWN_S;
case RSC_ROLE_STOPPED:
return RSC_ROLE_STOPPED_S;
case RSC_ROLE_STARTED:
return RSC_ROLE_STARTED_S;
case RSC_ROLE_UNPROMOTED:
return RSC_ROLE_SLAVE_S;
case RSC_ROLE_PROMOTED:
- return RSC_ROLE_MASTER_S;
+ return RSC_ROLE_PROMOTED_LEGACY_S;
}
CRM_CHECK(role >= RSC_ROLE_UNKNOWN, return RSC_ROLE_UNKNOWN_S);
CRM_CHECK(role < RSC_ROLE_MAX, return RSC_ROLE_UNKNOWN_S);
// coverity[dead_error_line]
return RSC_ROLE_UNKNOWN_S;
}
enum rsc_role_e
text2role(const char *role)
{
CRM_ASSERT(role != NULL);
if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
return RSC_ROLE_STOPPED;
} else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
return RSC_ROLE_STARTED;
} else if (pcmk__str_eq(role, RSC_ROLE_SLAVE_S, pcmk__str_casei)) {
return RSC_ROLE_UNPROMOTED;
- } else if (pcmk__str_eq(role, RSC_ROLE_MASTER_S, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(role, RSC_ROLE_PROMOTED_LEGACY_S, pcmk__str_casei)) {
return RSC_ROLE_PROMOTED;
} else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
return RSC_ROLE_UNKNOWN;
}
crm_err("Unknown role: %s", role);
return RSC_ROLE_UNKNOWN;
}
/*!
* \internal
* \brief Add two scores (bounding to +/- INFINITY)
*
* \param[in] score1 First score to add
* \param[in] score2 Second score to add
*/
int
pe__add_scores(int score1, int score2)
{
int result = score1 + score2;
// First handle the cases where one or both is infinite
if (score1 <= -CRM_SCORE_INFINITY) {
if (score2 <= -CRM_SCORE_INFINITY) {
crm_trace("-INFINITY + -INFINITY = -INFINITY");
} else if (score2 >= CRM_SCORE_INFINITY) {
crm_trace("-INFINITY + +INFINITY = -INFINITY");
} else {
crm_trace("-INFINITY + %d = -INFINITY", score2);
}
return -CRM_SCORE_INFINITY;
} else if (score2 <= -CRM_SCORE_INFINITY) {
if (score1 >= CRM_SCORE_INFINITY) {
crm_trace("+INFINITY + -INFINITY = -INFINITY");
} else {
crm_trace("%d + -INFINITY = -INFINITY", score1);
}
return -CRM_SCORE_INFINITY;
} else if (score1 >= CRM_SCORE_INFINITY) {
if (score2 >= CRM_SCORE_INFINITY) {
crm_trace("+INFINITY + +INFINITY = +INFINITY");
} else {
crm_trace("+INFINITY + %d = +INFINITY", score2);
}
return CRM_SCORE_INFINITY;
} else if (score2 >= CRM_SCORE_INFINITY) {
crm_trace("%d + +INFINITY = +INFINITY", score1);
return CRM_SCORE_INFINITY;
}
/* As long as CRM_SCORE_INFINITY is less than half of the maximum integer,
* we can ignore the possibility of integer overflow
*/
// Bound result to infinity
if (result >= CRM_SCORE_INFINITY) {
crm_trace("%d + %d = +INFINITY", score1, score2);
return CRM_SCORE_INFINITY;
} else if (result <= -CRM_SCORE_INFINITY) {
crm_trace("%d + %d = -INFINITY", score1, score2);
return -CRM_SCORE_INFINITY;
}
crm_trace("%d + %d = %d", score1, score2, result);
return result;
}
void
add_hash_param(GHashTable * hash, const char *name, const char *value)
{
CRM_CHECK(hash != NULL, return);
crm_trace("adding: name=%s value=%s", crm_str(name), crm_str(value));
if (name == NULL || value == NULL) {
return;
} else if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
return;
} else if (g_hash_table_lookup(hash, name) == NULL) {
g_hash_table_insert(hash, strdup(name), strdup(value));
}
}
const char *
pe_node_attribute_calculated(const pe_node_t *node, const char *name,
const pe_resource_t *rsc)
{
const char *source;
if(node == NULL) {
return NULL;
} else if(rsc == NULL) {
return g_hash_table_lookup(node->details->attrs, name);
}
source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
if(source == NULL || !pcmk__str_eq("host", source, pcmk__str_casei)) {
return g_hash_table_lookup(node->details->attrs, name);
}
/* Use attributes set for the containers location
* instead of for the container itself
*
* Useful when the container is using the host's local
* storage
*/
CRM_ASSERT(node->details->remote_rsc);
CRM_ASSERT(node->details->remote_rsc->container);
if(node->details->remote_rsc->container->running_on) {
pe_node_t *host = node->details->remote_rsc->container->running_on->data;
pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s", rsc->id, name, host->details->uname);
return g_hash_table_lookup(host->details->attrs, name);
}
pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive",
rsc->id, name, node->details->remote_rsc->container->id);
return NULL;
}
const char *
pe_node_attribute_raw(pe_node_t *node, const char *name)
{
if(node == NULL) {
return NULL;
}
return g_hash_table_lookup(node->details->attrs, name);
}
diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c
index 18f21b974c..bf4f458162 100644
--- a/tools/crm_resource_ban.c
+++ b/tools/crm_resource_ban.c
@@ -1,460 +1,465 @@
/*
* Copyright 2004-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_resource.h>
#define XPATH_MAX 1024
static char *
parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime)
{
char *later_s = NULL;
crm_time_t *now = NULL;
crm_time_t *later = NULL;
crm_time_t *duration = NULL;
if (move_lifetime == NULL) {
return NULL;
}
duration = crm_time_parse_duration(move_lifetime);
if (duration == NULL) {
out->err(out, "Invalid duration specified: %s\n"
"Please refer to https://en.wikipedia.org/wiki/ISO_8601#Durations "
"for examples of valid durations", move_lifetime);
return NULL;
}
now = crm_time_new(NULL);
later = crm_time_add(now, duration);
if (later == NULL) {
out->err(out, "Unable to add %s to current time\n"
"Please report to " PACKAGE_BUGREPORT " as possible bug",
move_lifetime);
crm_time_free(now);
crm_time_free(duration);
return NULL;
}
crm_time_log(LOG_INFO, "now ", now,
crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
crm_time_log(LOG_INFO, "later ", later,
crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
crm_time_log(LOG_INFO, "duration", duration, crm_time_log_date | crm_time_log_timeofday);
later_s = crm_time_as_string(later, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
out->info(out, "Migration will take effect until: %s", later_s);
crm_time_free(duration);
crm_time_free(later);
crm_time_free(now);
return later_s;
}
// \return Standard Pacemaker return code
int
cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host,
const char *move_lifetime, GList *allnodes, cib_t * cib_conn,
int cib_options, gboolean promoted_role_only)
{
char *later_s = NULL;
int rc = pcmk_rc_ok;
xmlNode *fragment = NULL;
xmlNode *location = NULL;
if(host == NULL) {
GList *n = allnodes;
for(; n && rc == pcmk_rc_ok; n = n->next) {
pe_node_t *target = n->data;
rc = cli_resource_ban(out, rsc_id, target->details->uname, move_lifetime,
NULL, cib_conn, cib_options, promoted_role_only);
}
return rc;
}
later_s = parse_cli_lifetime(out, move_lifetime);
if(move_lifetime && later_s == NULL) {
return EINVAL;
}
fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host);
out->info(out, "WARNING: Creating rsc_location constraint '%s' with a "
"score of -INFINITY for resource %s on %s.\n\tThis will "
"prevent %s from %s on %s until the constraint is removed "
"using the clear option or by editing the CIB with an "
"appropriate tool\n\tThis will be the case even if %s "
"is the last node in the cluster",
ID(location), rsc_id, host, rsc_id,
(promoted_role_only? "being promoted" : "running"),
host, host);
crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id);
if(promoted_role_only) {
- crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S);
+ crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_PROMOTED_LEGACY_S);
} else {
crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S);
}
if (later_s == NULL) {
/* Short form */
crm_xml_add(location, XML_CIB_TAG_NODE, host);
crm_xml_add(location, XML_RULE_ATTR_SCORE, CRM_MINUS_INFINITY_S);
} else {
xmlNode *rule = create_xml_node(location, XML_TAG_RULE);
xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION);
crm_xml_set_id(rule, "cli-ban-%s-on-%s-rule", rsc_id, host);
crm_xml_add(rule, XML_RULE_ATTR_SCORE, CRM_MINUS_INFINITY_S);
crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and");
crm_xml_set_id(expr, "cli-ban-%s-on-%s-expr", rsc_id, host);
crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, CRM_ATTR_UNAME);
crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq");
crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host);
crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string");
expr = create_xml_node(rule, "date_expression");
crm_xml_set_id(expr, "cli-ban-%s-on-%s-lifetime", rsc_id, host);
crm_xml_add(expr, "operation", "lt");
crm_xml_add(expr, "end", later_s);
}
crm_log_xml_notice(fragment, "Modify");
rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(fragment);
free(later_s);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host,
const char *move_lifetime, cib_t * cib_conn, int cib_options,
gboolean promoted_role_only)
{
char *later_s = parse_cli_lifetime(out, move_lifetime);
int rc = pcmk_rc_ok;
xmlNode *location = NULL;
xmlNode *fragment = NULL;
if(move_lifetime && later_s == NULL) {
return EINVAL;
}
if(cib_conn == NULL) {
free(later_s);
return ENOTCONN;
}
fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
crm_xml_set_id(location, "cli-prefer-%s", rsc_id);
crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id);
if(promoted_role_only) {
- crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_MASTER_S);
+ crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_PROMOTED_LEGACY_S);
} else {
crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S);
}
if (later_s == NULL) {
/* Short form */
crm_xml_add(location, XML_CIB_TAG_NODE, host);
crm_xml_add(location, XML_RULE_ATTR_SCORE, CRM_INFINITY_S);
} else {
xmlNode *rule = create_xml_node(location, XML_TAG_RULE);
xmlNode *expr = create_xml_node(rule, XML_TAG_EXPRESSION);
crm_xml_set_id(rule, "cli-prefer-rule-%s", rsc_id);
crm_xml_add(rule, XML_RULE_ATTR_SCORE, CRM_INFINITY_S);
crm_xml_add(rule, XML_RULE_ATTR_BOOLEAN_OP, "and");
crm_xml_set_id(expr, "cli-prefer-expr-%s", rsc_id);
crm_xml_add(expr, XML_EXPR_ATTR_ATTRIBUTE, CRM_ATTR_UNAME);
crm_xml_add(expr, XML_EXPR_ATTR_OPERATION, "eq");
crm_xml_add(expr, XML_EXPR_ATTR_VALUE, host);
crm_xml_add(expr, XML_EXPR_ATTR_TYPE, "string");
expr = create_xml_node(rule, "date_expression");
crm_xml_set_id(expr, "cli-prefer-lifetime-end-%s", rsc_id);
crm_xml_add(expr, "operation", "lt");
crm_xml_add(expr, "end", later_s);
}
crm_log_xml_info(fragment, "Modify");
rc = cib_conn->cmds->update(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options);
rc = pcmk_legacy2rc(rc);
free_xml(fragment);
free(later_s);
return rc;
}
/* Nodes can be specified two different ways in the CIB, so we have two different
* functions to try clearing out any constraints on them:
*
* (1) The node could be given by attribute=/value= in an expression XML node.
* That's what resource_clear_node_in_expr handles. That XML looks like this:
*
* <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started">
* <rule id="cli-prefer-rule-dummy" score="INFINITY" boolean-op="and">
* <expression id="cli-prefer-expr-dummy" attribute="#uname" operation="eq" value="test02" type="string"/>
* <date_expression id="cli-prefer-lifetime-end-dummy" operation="lt" end="2018-12-12 14:05:37 -05:00"/>
* </rule>
* </rsc_location>
*
* (2) The mode could be given by node= in an rsc_location XML node. That's
* what resource_clear_node_in_location handles. That XML looks like this:
*
* <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
*
* \return Standard Pacemaker return code
*/
static int
resource_clear_node_in_expr(const char *rsc_id, const char *host, cib_t * cib_conn,
int cib_options)
{
int rc = pcmk_rc_ok;
char *xpath_string = NULL;
xpath_string = crm_strdup_printf("//rsc_location[@id='cli-prefer-%s'][rule[@id='cli-prefer-rule-%s']/expression[@attribute='#uname' and @value='%s']]",
rsc_id, rsc_id, host);
rc = cib_conn->cmds->remove(cib_conn, xpath_string, NULL, cib_xpath | cib_options);
if (rc == -ENXIO) {
rc = pcmk_rc_ok;
} else {
rc = pcmk_legacy2rc(rc);
}
free(xpath_string);
return rc;
}
// \return Standard Pacemaker return code
static int
resource_clear_node_in_location(const char *rsc_id, const char *host, cib_t * cib_conn,
int cib_options, bool clear_ban_constraints, gboolean force)
{
int rc = pcmk_rc_ok;
xmlNode *fragment = NULL;
xmlNode *location = NULL;
fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
if (clear_ban_constraints == TRUE) {
location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
crm_xml_set_id(location, "cli-ban-%s-on-%s", rsc_id, host);
}
location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
crm_xml_set_id(location, "cli-prefer-%s", rsc_id);
if (force == FALSE) {
crm_xml_add(location, XML_CIB_TAG_NODE, host);
}
crm_log_xml_info(fragment, "Delete");
rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_CONSTRAINTS, fragment, cib_options);
if (rc == -ENXIO) {
rc = pcmk_rc_ok;
} else {
rc = pcmk_legacy2rc(rc);
}
free(fragment);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t * cib_conn,
int cib_options, bool clear_ban_constraints, gboolean force)
{
int rc = pcmk_rc_ok;
if(cib_conn == NULL) {
return ENOTCONN;
}
if (host) {
rc = resource_clear_node_in_expr(rsc_id, host, cib_conn, cib_options);
/* rc does not tell us whether the previous operation did anything, only
* whether it failed or not. Thus, as long as it did not fail, we need
* to try the second clear method.
*/
if (rc == pcmk_rc_ok) {
rc = resource_clear_node_in_location(rsc_id, host, cib_conn,
cib_options, clear_ban_constraints,
force);
}
} else {
GList *n = allnodes;
/* Iterate over all nodes, attempting to clear the constraint from each.
* On the first error, abort.
*/
for(; n; n = n->next) {
pe_node_t *target = n->data;
rc = cli_resource_clear(rsc_id, target->details->uname, NULL,
cib_conn, cib_options, clear_ban_constraints,
force);
if (rc != pcmk_rc_ok) {
break;
}
}
}
return rc;
}
static char *
build_clear_xpath_string(xmlNode *constraint_node, const char *rsc, const char *node, gboolean promoted_role_only)
{
int offset = 0;
char *xpath_string = NULL;
char *first_half = NULL;
char *rsc_role_substr = NULL;
char *date_substr = NULL;
if (pcmk__starts_with(ID(constraint_node), "cli-ban-")) {
date_substr = crm_strdup_printf("//date_expression[@id='%s-lifetime']",
ID(constraint_node));
} else if (pcmk__starts_with(ID(constraint_node), "cli-prefer-")) {
date_substr = crm_strdup_printf("//date_expression[@id='cli-prefer-lifetime-end-%s']",
crm_element_value(constraint_node, "rsc"));
} else {
return NULL;
}
first_half = calloc(1, XPATH_MAX);
offset += snprintf(first_half + offset, XPATH_MAX - offset, "//rsc_location");
if (node != NULL || rsc != NULL || promoted_role_only == TRUE) {
offset += snprintf(first_half + offset, XPATH_MAX - offset, "[");
if (node != NULL) {
if (rsc != NULL || promoted_role_only == TRUE) {
offset += snprintf(first_half + offset, XPATH_MAX - offset, "@node='%s' and ", node);
} else {
offset += snprintf(first_half + offset, XPATH_MAX - offset, "@node='%s'", node);
}
}
if (rsc != NULL && promoted_role_only == TRUE) {
- rsc_role_substr = crm_strdup_printf("@rsc='%s' and @role='%s'", rsc, RSC_ROLE_MASTER_S);
- offset += snprintf(first_half + offset, XPATH_MAX - offset, "@rsc='%s' and @role='%s']", rsc, RSC_ROLE_MASTER_S);
+ rsc_role_substr = crm_strdup_printf("@rsc='%s' and @role='%s'",
+ rsc, RSC_ROLE_PROMOTED_LEGACY_S);
+ offset += snprintf(first_half + offset, XPATH_MAX - offset,
+ "@rsc='%s' and @role='%s']",
+ rsc, RSC_ROLE_PROMOTED_LEGACY_S);
} else if (rsc != NULL) {
rsc_role_substr = crm_strdup_printf("@rsc='%s'", rsc);
offset += snprintf(first_half + offset, XPATH_MAX - offset, "@rsc='%s']", rsc);
} else if (promoted_role_only == TRUE) {
- rsc_role_substr = crm_strdup_printf("@role='%s'", RSC_ROLE_MASTER_S);
- offset += snprintf(first_half + offset, XPATH_MAX - offset, "@role='%s']", RSC_ROLE_MASTER_S);
+ rsc_role_substr = crm_strdup_printf("@role='%s'",
+ RSC_ROLE_PROMOTED_LEGACY_S);
+ offset += snprintf(first_half + offset, XPATH_MAX - offset,
+ "@role='%s']", RSC_ROLE_PROMOTED_LEGACY_S);
} else {
offset += snprintf(first_half + offset, XPATH_MAX - offset, "]");
}
}
if (node != NULL) {
if (rsc_role_substr != NULL) {
xpath_string = crm_strdup_printf("%s|//rsc_location[%s]/rule[expression[@attribute='#uname' and @value='%s']]%s",
first_half, rsc_role_substr, node, date_substr);
} else {
xpath_string = crm_strdup_printf("%s|//rsc_location/rule[expression[@attribute='#uname' and @value='%s']]%s",
first_half, node, date_substr);
}
} else {
xpath_string = crm_strdup_printf("%s%s", first_half, date_substr);
}
free(first_half);
free(date_substr);
free(rsc_role_substr);
return xpath_string;
}
// \return Standard Pacemaker return code
int
cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, int cib_options,
const char *rsc, const char *node, gboolean promoted_role_only)
{
xmlXPathObject *xpathObj = NULL;
xmlNode *cib_constraints = NULL;
crm_time_t *now = crm_time_new(NULL);
int i;
int rc = pcmk_rc_ok;
cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, root);
xpathObj = xpath_search(cib_constraints, "//" XML_CONS_TAG_RSC_LOCATION);
for (i = 0; i < numXpathResults(xpathObj); i++) {
xmlNode *constraint_node = getXpathResult(xpathObj, i);
xmlNode *date_expr_node = NULL;
crm_time_t *end = NULL;
char *xpath_string = NULL;
xpath_string = build_clear_xpath_string(constraint_node, rsc, node, promoted_role_only);
if (xpath_string == NULL) {
continue;
}
date_expr_node = get_xpath_object(xpath_string, constraint_node, LOG_DEBUG);
if (date_expr_node == NULL) {
free(xpath_string);
continue;
}
/* And then finally, see if the date expression is expired. If so,
* clear the constraint.
*/
end = crm_time_new(crm_element_value(date_expr_node, "end"));
if (crm_time_compare(now, end) == 1) {
xmlNode *fragment = NULL;
xmlNode *location = NULL;
fragment = create_xml_node(NULL, XML_CIB_TAG_CONSTRAINTS);
location = create_xml_node(fragment, XML_CONS_TAG_RSC_LOCATION);
crm_xml_set_id(location, "%s", ID(constraint_node));
crm_log_xml_info(fragment, "Delete");
rc = cib_conn->cmds->remove(cib_conn, XML_CIB_TAG_CONSTRAINTS,
fragment, cib_options);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
free(xpath_string);
goto done;
}
free_xml(fragment);
}
crm_time_free(end);
free(xpath_string);
}
done:
freeXpathObject(xpathObj);
crm_time_free(now);
return rc;
}

File Metadata

Mime Type
text/x-diff
Expires
Thu, Jun 26, 7:45 PM (1 d, 9 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1959590
Default Alt Text
(46 KB)

Event Timeline