Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c
index 223a70157c..dc8ace77b7 100644
--- a/daemons/fenced/fenced_commands.c
+++ b/daemons/fenced/fenced_commands.c
@@ -1,3709 +1,3709 @@
/*
* Copyright 2009-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/utsname.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <ctype.h>
#include <crm/crm.h>
#include <crm/common/ipc.h>
#include <crm/common/ipc_internal.h>
#include <crm/cluster/internal.h>
#include <crm/common/mainloop.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
#include <crm/common/xml.h>
#include <pacemaker-fenced.h>
GHashTable *device_list = NULL;
GHashTable *topology = NULL;
static GList *cmd_list = NULL;
static GHashTable *fenced_handlers = NULL;
struct device_search_s {
/* target of fence action */
char *host;
/* requested fence action */
char *action;
/* timeout to use if a device is queried dynamically for possible targets */
int per_device_timeout;
/* number of registered fencing devices at time of request */
int replies_needed;
/* number of device replies received so far */
int replies_received;
/* whether the target is eligible to perform requested action (or off) */
bool allow_suicide;
/* private data to pass to search callback function */
void *user_data;
/* function to call when all replies have been received */
void (*callback) (GList * devices, void *user_data);
/* devices capable of performing requested action (or off if remapping) */
GList *capable;
/* Whether to perform searches that support the action */
uint32_t support_action_only;
};
static gboolean stonith_device_dispatch(gpointer user_data);
static void st_child_done(int pid, const pcmk__action_result_t *result,
void *user_data);
static void search_devices_record_result(struct device_search_s *search, const char *device,
gboolean can_fence);
static int get_agent_metadata(const char *agent, xmlNode **metadata);
static void read_action_metadata(stonith_device_t *device);
static enum fenced_target_by unpack_level_kind(const xmlNode *level);
typedef struct async_command_s {
int id;
int pid;
int fd_stdout;
int options;
int default_timeout; /* seconds */
int timeout; /* seconds */
int start_delay; // seconds (-1 means disable static/random fencing delays)
int delay_id;
char *op;
char *origin;
char *client;
char *client_name;
char *remote_op_id;
char *target;
uint32_t target_nodeid;
char *action;
char *device;
GList *device_list;
GList *next_device_iter; // device_list entry for next device to execute
void *internal_user_data;
void (*done_cb) (int pid, const pcmk__action_result_t *result,
void *user_data);
guint timer_sigterm;
guint timer_sigkill;
/*! If the operation timed out, this is the last signal
* we sent to the process to get it to terminate */
int last_timeout_signo;
stonith_device_t *active_on;
stonith_device_t *activating_on;
} async_command_t;
static xmlNode *construct_async_reply(const async_command_t *cmd,
const pcmk__action_result_t *result);
static gboolean
is_action_required(const char *action, const stonith_device_t *device)
{
return (device != NULL) && device->automatic_unfencing
&& pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none);
}
static int
get_action_delay_max(const stonith_device_t *device, const char *action)
{
const char *value = NULL;
guint delay_max = 0U;
if (!pcmk__is_fencing_action(action)) {
return 0;
}
value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX);
if (value) {
pcmk_parse_interval_spec(value, &delay_max);
delay_max /= 1000;
}
return (int) delay_max;
}
static int
get_action_delay_base(const stonith_device_t *device, const char *action,
const char *target)
{
char *hash_value = NULL;
guint delay_base = 0U;
if (!pcmk__is_fencing_action(action)) {
return 0;
}
hash_value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE);
if (hash_value) {
char *value = pcmk__str_copy(hash_value);
char *valptr = value;
if (target != NULL) {
for (char *val = strtok(value, "; \t"); val != NULL; val = strtok(NULL, "; \t")) {
char *mapval = strchr(val, ':');
if (mapval == NULL || mapval[1] == 0) {
crm_err("pcmk_delay_base: empty value in mapping", val);
continue;
}
if (mapval != val && strncasecmp(target, val, (size_t)(mapval - val)) == 0) {
value = mapval + 1;
crm_debug("pcmk_delay_base mapped to %s for %s",
value, target);
break;
}
}
}
if (strchr(value, ':') == 0) {
pcmk_parse_interval_spec(value, &delay_base);
delay_base /= 1000;
}
free(valptr);
}
return (int) delay_base;
}
/*!
* \internal
* \brief Override STONITH timeout with pcmk_*_timeout if available
*
* \param[in] device STONITH device to use
* \param[in] action STONITH action name
* \param[in] default_timeout Timeout to use if device does not have
* a pcmk_*_timeout parameter for action
*
* \return Value of pcmk_(action)_timeout if available, otherwise default_timeout
* \note For consistency, it would be nice if reboot/off/on timeouts could be
* set the same way as start/stop/monitor timeouts, i.e. with an
* <operation> entry in the fencing resource configuration. However that
* is insufficient because fencing devices may be registered directly via
* the fencer's register_device() API instead of going through the CIB
* (e.g. stonith_admin uses it for its -R option, and the executor uses it
* to ensure a device is registered when a command is issued). As device
* properties, pcmk_*_timeout parameters can be grabbed by the fencer when
* the device is registered, whether by CIB change or API call.
*/
static int
get_action_timeout(const stonith_device_t *device, const char *action,
int default_timeout)
{
if (action && device && device->params) {
char buffer[64] = { 0, };
const char *value = NULL;
/* If "reboot" was requested but the device does not support it,
* we will remap to "off", so check timeout for "off" instead
*/
if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)
&& !pcmk_is_set(device->flags, st_device_supports_reboot)) {
crm_trace("%s doesn't support reboot, using timeout for off instead",
device->id);
action = PCMK_ACTION_OFF;
}
/* If the device config specified an action-specific timeout, use it */
snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action);
value = g_hash_table_lookup(device->params, buffer);
if (value) {
long long timeout_ms = crm_get_msec(value);
return (int) QB_MIN(timeout_ms / 1000, INT_MAX);
}
}
return default_timeout;
}
/*!
* \internal
* \brief Get the currently executing device for a fencing operation
*
* \param[in] cmd Fencing operation to check
*
* \return Currently executing device for \p cmd if any, otherwise NULL
*/
static stonith_device_t *
cmd_device(const async_command_t *cmd)
{
if ((cmd == NULL) || (cmd->device == NULL) || (device_list == NULL)) {
return NULL;
}
return g_hash_table_lookup(device_list, cmd->device);
}
/*!
* \internal
* \brief Return the configured reboot action for a given device
*
* \param[in] device_id Device ID
*
* \return Configured reboot action for \p device_id
*/
const char *
fenced_device_reboot_action(const char *device_id)
{
const char *action = NULL;
if ((device_list != NULL) && (device_id != NULL)) {
stonith_device_t *device = g_hash_table_lookup(device_list, device_id);
if ((device != NULL) && (device->params != NULL)) {
action = g_hash_table_lookup(device->params, "pcmk_reboot_action");
}
}
return pcmk__s(action, PCMK_ACTION_REBOOT);
}
/*!
* \internal
* \brief Check whether a given device supports the "on" action
*
* \param[in] device_id Device ID
*
* \return true if \p device_id supports "on", otherwise false
*/
bool
fenced_device_supports_on(const char *device_id)
{
if ((device_list != NULL) && (device_id != NULL)) {
stonith_device_t *device = g_hash_table_lookup(device_list, device_id);
if (device != NULL) {
return pcmk_is_set(device->flags, st_device_supports_on);
}
}
return false;
}
static void
free_async_command(async_command_t * cmd)
{
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
cmd_list = g_list_remove(cmd_list, cmd);
g_list_free_full(cmd->device_list, free);
free(cmd->device);
free(cmd->action);
free(cmd->target);
free(cmd->remote_op_id);
free(cmd->client);
free(cmd->client_name);
free(cmd->origin);
free(cmd->op);
free(cmd);
}
/*!
* \internal
* \brief Create a new asynchronous fencing operation from request XML
*
* \param[in] msg Fencing request XML (from IPC or CPG)
*
* \return Newly allocated fencing operation on success, otherwise NULL
*
* \note This asserts on memory errors, so a NULL return indicates an
* unparseable message.
*/
static async_command_t *
create_async_command(xmlNode *msg)
{
xmlNode *op = NULL;
async_command_t *cmd = NULL;
if (msg == NULL) {
return NULL;
}
op = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, msg, LOG_ERR);
if (op == NULL) {
return NULL;
}
cmd = pcmk__assert_alloc(1, sizeof(async_command_t));
// All messages must include these
cmd->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION);
cmd->op = crm_element_value_copy(msg, PCMK__XA_ST_OP);
cmd->client = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTID);
if ((cmd->action == NULL) || (cmd->op == NULL) || (cmd->client == NULL)) {
free_async_command(cmd);
return NULL;
}
crm_element_value_int(msg, PCMK__XA_ST_CALLID, &(cmd->id));
crm_element_value_int(msg, PCMK__XA_ST_CALLOPT, &(cmd->options));
crm_element_value_int(msg, PCMK__XA_ST_DELAY, &(cmd->start_delay));
crm_element_value_int(msg, PCMK__XA_ST_TIMEOUT, &(cmd->default_timeout));
cmd->timeout = cmd->default_timeout;
cmd->origin = crm_element_value_copy(msg, PCMK__XA_SRC);
cmd->remote_op_id = crm_element_value_copy(msg, PCMK__XA_ST_REMOTE_OP);
cmd->client_name = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTNAME);
cmd->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET);
cmd->device = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ID);
cmd->done_cb = st_child_done;
// Track in global command list
cmd_list = g_list_append(cmd_list, cmd);
return cmd;
}
static int
get_action_limit(stonith_device_t * device)
{
const char *value = NULL;
int action_limit = 1;
value = g_hash_table_lookup(device->params, PCMK_STONITH_ACTION_LIMIT);
if ((value == NULL)
|| (pcmk__scan_min_int(value, &action_limit, INT_MIN) != pcmk_rc_ok)
|| (action_limit == 0)) {
action_limit = 1;
}
return action_limit;
}
static int
get_active_cmds(stonith_device_t * device)
{
int counter = 0;
GList *gIter = NULL;
GList *gIterNext = NULL;
CRM_CHECK(device != NULL, return 0);
for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
async_command_t *cmd = gIter->data;
gIterNext = gIter->next;
if (cmd->active_on == device) {
counter++;
}
}
return counter;
}
static void
fork_cb(int pid, void *user_data)
{
async_command_t *cmd = (async_command_t *) user_data;
stonith_device_t * device =
/* in case of a retry we've done the move from
activating_on to active_on already
*/
cmd->activating_on?cmd->activating_on:cmd->active_on;
CRM_ASSERT(device);
crm_debug("Operation '%s' [%d]%s%s using %s now running with %ds timeout",
cmd->action, pid,
((cmd->target == NULL)? "" : " targeting "),
pcmk__s(cmd->target, ""), device->id, cmd->timeout);
cmd->active_on = device;
cmd->activating_on = NULL;
}
static int
get_agent_metadata_cb(gpointer data) {
stonith_device_t *device = data;
guint period_ms;
switch (get_agent_metadata(device->agent, &device->agent_metadata)) {
case pcmk_rc_ok:
if (device->agent_metadata) {
read_action_metadata(device);
stonith__device_parameter_flags(&(device->flags), device->id,
device->agent_metadata);
}
return G_SOURCE_REMOVE;
case EAGAIN:
period_ms = pcmk__mainloop_timer_get_period(device->timer);
if (period_ms < 160 * 1000) {
mainloop_timer_set_period(device->timer, 2 * period_ms);
}
return G_SOURCE_CONTINUE;
default:
return G_SOURCE_REMOVE;
}
}
/*!
* \internal
* \brief Call a command's action callback for an internal (not library) result
*
* \param[in,out] cmd Command to report result for
* \param[in] execution_status Execution status to use for result
* \param[in] exit_status Exit status to use for result
* \param[in] exit_reason Exit reason to use for result
*/
static void
report_internal_result(async_command_t *cmd, int exit_status,
int execution_status, const char *exit_reason)
{
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
pcmk__set_result(&result, exit_status, execution_status, exit_reason);
cmd->done_cb(0, &result, cmd);
pcmk__reset_result(&result);
}
static gboolean
stonith_device_execute(stonith_device_t * device)
{
int exec_rc = 0;
const char *action_str = NULL;
const char *host_arg = NULL;
async_command_t *cmd = NULL;
stonith_action_t *action = NULL;
int active_cmds = 0;
int action_limit = 0;
GList *gIter = NULL;
GList *gIterNext = NULL;
CRM_CHECK(device != NULL, return FALSE);
active_cmds = get_active_cmds(device);
action_limit = get_action_limit(device);
if (action_limit > -1 && active_cmds >= action_limit) {
crm_trace("%s is over its action limit of %d (%u active action%s)",
device->id, action_limit, active_cmds,
pcmk__plural_s(active_cmds));
return TRUE;
}
for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) {
async_command_t *pending_op = gIter->data;
gIterNext = gIter->next;
if (pending_op && pending_op->delay_id) {
crm_trace("Operation '%s'%s%s using %s was asked to run too early, "
"waiting for start delay of %ds",
pending_op->action,
((pending_op->target == NULL)? "" : " targeting "),
pcmk__s(pending_op->target, ""),
device->id, pending_op->start_delay);
continue;
}
device->pending_ops = g_list_remove_link(device->pending_ops, gIter);
g_list_free_1(gIter);
cmd = pending_op;
break;
}
if (cmd == NULL) {
crm_trace("No actions using %s are needed", device->id);
return TRUE;
}
if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT,
STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) {
if (pcmk__is_fencing_action(cmd->action)) {
if (node_does_watchdog_fencing(stonith_our_uname)) {
pcmk__panic(__func__);
goto done;
}
} else {
crm_info("Faking success for %s watchdog operation", cmd->action);
report_internal_result(cmd, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
goto done;
}
}
#if SUPPORT_CIBSECRETS
exec_rc = pcmk__substitute_secrets(device->id, device->params);
if (exec_rc != pcmk_rc_ok) {
if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) {
crm_info("Proceeding with stop operation for %s "
"despite being unable to load CIB secrets (%s)",
device->id, pcmk_rc_str(exec_rc));
} else {
crm_err("Considering %s unconfigured "
"because unable to load CIB secrets: %s",
device->id, pcmk_rc_str(exec_rc));
report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_SECRETS,
"Failed to get CIB secrets");
goto done;
}
}
#endif
action_str = cmd->action;
if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none)
&& !pcmk_is_set(device->flags, st_device_supports_reboot)) {
crm_notice("Remapping 'reboot' action%s%s using %s to 'off' "
"because agent '%s' does not support reboot",
((cmd->target == NULL)? "" : " targeting "),
pcmk__s(cmd->target, ""), device->id, device->agent);
action_str = PCMK_ACTION_OFF;
}
if (pcmk_is_set(device->flags, st_device_supports_parameter_port)) {
host_arg = "port";
} else if (pcmk_is_set(device->flags, st_device_supports_parameter_plug)) {
host_arg = "plug";
}
action = stonith__action_create(device->agent, action_str, cmd->target,
cmd->target_nodeid, cmd->timeout,
device->params, device->aliases, host_arg);
/* for async exec, exec_rc is negative for early error exit
otherwise handling of success/errors is done via callbacks */
cmd->activating_on = device;
exec_rc = stonith__execute_async(action, (void *)cmd, cmd->done_cb,
fork_cb);
if (exec_rc < 0) {
cmd->activating_on = NULL;
cmd->done_cb(0, stonith__action_result(action), cmd);
stonith__destroy_action(action);
}
done:
/* Device might get triggered to work by multiple fencing commands
* simultaneously. Trigger the device again to make sure any
* remaining concurrent commands get executed. */
if (device->pending_ops) {
mainloop_set_trigger(device->work);
}
return TRUE;
}
static gboolean
stonith_device_dispatch(gpointer user_data)
{
return stonith_device_execute(user_data);
}
static gboolean
start_delay_helper(gpointer data)
{
async_command_t *cmd = data;
stonith_device_t *device = cmd_device(cmd);
cmd->delay_id = 0;
if (device) {
mainloop_set_trigger(device->work);
}
return FALSE;
}
static void
schedule_stonith_command(async_command_t * cmd, stonith_device_t * device)
{
int delay_max = 0;
int delay_base = 0;
int requested_delay = cmd->start_delay;
CRM_CHECK(cmd != NULL, return);
CRM_CHECK(device != NULL, return);
if (cmd->device) {
free(cmd->device);
}
if (device->include_nodeid && (cmd->target != NULL)) {
crm_node_t *node = pcmk__get_node(0, cmd->target, NULL,
pcmk__node_search_cluster_member);
cmd->target_nodeid = node->id;
}
cmd->device = pcmk__str_copy(device->id);
cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout);
if (cmd->remote_op_id) {
crm_debug("Scheduling '%s' action%s%s using %s for remote peer %s "
"with op id %.8s and timeout %ds",
cmd->action,
(cmd->target == NULL)? "" : " targeting ",
pcmk__s(cmd->target, ""),
device->id, cmd->origin, cmd->remote_op_id, cmd->timeout);
} else {
crm_debug("Scheduling '%s' action%s%s using %s for %s with timeout %ds",
cmd->action,
(cmd->target == NULL)? "" : " targeting ",
pcmk__s(cmd->target, ""),
device->id, cmd->client, cmd->timeout);
}
device->pending_ops = g_list_append(device->pending_ops, cmd);
mainloop_set_trigger(device->work);
// Value -1 means disable any static/random fencing delays
if (requested_delay < 0) {
return;
}
delay_max = get_action_delay_max(device, cmd->action);
delay_base = get_action_delay_base(device, cmd->action, cmd->target);
if (delay_max == 0) {
delay_max = delay_base;
}
if (delay_max < delay_base) {
crm_warn(PCMK_STONITH_DELAY_BASE " (%ds) is larger than "
PCMK_STONITH_DELAY_MAX " (%ds) for %s using %s "
"(limiting to maximum delay)",
delay_base, delay_max, cmd->action, device->id);
delay_base = delay_max;
}
if (delay_max > 0) {
// coverity[dontcall] It doesn't matter here if rand() is predictable
cmd->start_delay +=
((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0)
+ delay_base;
}
if (cmd->start_delay > 0) {
crm_notice("Delaying '%s' action%s%s using %s for %ds " CRM_XS
" timeout=%ds requested_delay=%ds base=%ds max=%ds",
cmd->action,
(cmd->target == NULL)? "" : " targeting ",
pcmk__s(cmd->target, ""),
device->id, cmd->start_delay, cmd->timeout,
requested_delay, delay_base, delay_max);
cmd->delay_id =
g_timeout_add_seconds(cmd->start_delay, start_delay_helper, cmd);
}
}
static void
free_device(gpointer data)
{
GList *gIter = NULL;
stonith_device_t *device = data;
g_hash_table_destroy(device->params);
g_hash_table_destroy(device->aliases);
for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) {
async_command_t *cmd = gIter->data;
crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action);
report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"Device was removed before action could be executed");
}
g_list_free(device->pending_ops);
g_list_free_full(device->targets, free);
if (device->timer) {
mainloop_timer_stop(device->timer);
mainloop_timer_del(device->timer);
}
mainloop_destroy_trigger(device->work);
free_xml(device->agent_metadata);
free(device->namespace);
if (device->on_target_actions != NULL) {
g_string_free(device->on_target_actions, TRUE);
}
free(device->agent);
free(device->id);
free(device);
}
void free_device_list(void)
{
if (device_list != NULL) {
g_hash_table_destroy(device_list);
device_list = NULL;
}
}
void
init_device_list(void)
{
if (device_list == NULL) {
device_list = pcmk__strkey_table(NULL, free_device);
}
}
static GHashTable *
build_port_aliases(const char *hostmap, GList ** targets)
{
char *name = NULL;
int last = 0, lpc = 0, max = 0, added = 0;
GHashTable *aliases = pcmk__strikey_table(free, free);
if (hostmap == NULL) {
return aliases;
}
max = strlen(hostmap);
for (; lpc <= max; lpc++) {
switch (hostmap[lpc]) {
/* Skip escaped chars */
case '\\':
lpc++;
break;
/* Assignment chars */
case '=':
case ':':
if (lpc > last) {
free(name);
name = pcmk__assert_alloc(1, 1 + lpc - last);
memcpy(name, hostmap + last, lpc - last);
}
last = lpc + 1;
break;
/* Delimeter chars */
/* case ',': Potentially used to specify multiple ports */
case 0:
case ';':
case ' ':
case '\t':
if (name) {
char *value = NULL;
int k = 0;
value = pcmk__assert_alloc(1, 1 + lpc - last);
memcpy(value, hostmap + last, lpc - last);
for (int i = 0; value[i] != '\0'; i++) {
if (value[i] != '\\') {
value[k++] = value[i];
}
}
value[k] = '\0';
crm_debug("Adding alias '%s'='%s'", name, value);
g_hash_table_replace(aliases, name, value);
if (targets) {
*targets = g_list_append(*targets, pcmk__str_copy(value));
}
value = NULL;
name = NULL;
added++;
} else if (lpc > last) {
crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last);
}
last = lpc + 1;
break;
}
if (hostmap[lpc] == 0) {
break;
}
}
if (added == 0) {
crm_info("No host mappings detected in '%s'", hostmap);
}
free(name);
return aliases;
}
GHashTable *metadata_cache = NULL;
void
free_metadata_cache(void) {
if (metadata_cache != NULL) {
g_hash_table_destroy(metadata_cache);
metadata_cache = NULL;
}
}
static void
init_metadata_cache(void) {
if (metadata_cache == NULL) {
metadata_cache = pcmk__strkey_table(free, free);
}
}
int
get_agent_metadata(const char *agent, xmlNode ** metadata)
{
char *buffer = NULL;
if (metadata == NULL) {
return EINVAL;
}
*metadata = NULL;
if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) {
return pcmk_rc_ok;
}
init_metadata_cache();
buffer = g_hash_table_lookup(metadata_cache, agent);
if (buffer == NULL) {
stonith_t *st = stonith_api_new();
int rc;
if (st == NULL) {
crm_warn("Could not get agent meta-data: "
"API memory allocation failed");
return EAGAIN;
}
rc = st->cmds->metadata(st, st_opt_sync_call, agent,
NULL, &buffer, 10);
stonith_api_delete(st);
if (rc || !buffer) {
crm_err("Could not retrieve metadata for fencing agent %s", agent);
return EAGAIN;
}
g_hash_table_replace(metadata_cache, pcmk__str_copy(agent), buffer);
}
*metadata = pcmk__xml_parse(buffer);
return pcmk_rc_ok;
}
static gboolean
is_nodeid_required(xmlNode * xml)
{
xmlXPathObjectPtr xpath = NULL;
if (stand_alone) {
return FALSE;
}
if (!xml) {
return FALSE;
}
xpath = xpath_search(xml,
"//" PCMK_XE_PARAMETER "[@" PCMK_XA_NAME "='nodeid']");
if (numXpathResults(xpath) <= 0) {
freeXpathObject(xpath);
return FALSE;
}
freeXpathObject(xpath);
return TRUE;
}
static void
read_action_metadata(stonith_device_t *device)
{
xmlXPathObjectPtr xpath = NULL;
int max = 0;
int lpc = 0;
if (device->agent_metadata == NULL) {
return;
}
xpath = xpath_search(device->agent_metadata, "//action");
max = numXpathResults(xpath);
if (max <= 0) {
freeXpathObject(xpath);
return;
}
for (lpc = 0; lpc < max; lpc++) {
const char *action = NULL;
xmlNode *match = getXpathResult(xpath, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match == NULL) { continue; };
action = crm_element_value(match, PCMK_XA_NAME);
if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) {
stonith__set_device_flags(device->flags, device->id,
st_device_supports_list);
} else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) {
stonith__set_device_flags(device->flags, device->id,
st_device_supports_status);
} else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
stonith__set_device_flags(device->flags, device->id,
st_device_supports_reboot);
} else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
/* PCMK_XA_AUTOMATIC means the cluster will unfence a node when it
* joins.
*
* @COMPAT PCMK__XA_REQUIRED is a deprecated synonym for
* PCMK_XA_AUTOMATIC.
*/
if (pcmk__xe_attr_is_true(match, PCMK_XA_AUTOMATIC)
|| pcmk__xe_attr_is_true(match, PCMK__XA_REQUIRED)) {
device->automatic_unfencing = TRUE;
}
stonith__set_device_flags(device->flags, device->id,
st_device_supports_on);
}
if ((action != NULL)
&& pcmk__xe_attr_is_true(match, PCMK_XA_ON_TARGET)) {
pcmk__add_word(&(device->on_target_actions), 64, action);
}
}
freeXpathObject(xpath);
}
/*!
* \internal
* \brief Set a pcmk_*_action parameter if not already set
*
* \param[in,out] params Device parameters
* \param[in] action Name of action
* \param[in] value Value to use if action is not already set
*/
static void
map_action(GHashTable *params, const char *action, const char *value)
{
char *key = crm_strdup_printf("pcmk_%s_action", action);
if (g_hash_table_lookup(params, key)) {
crm_warn("Ignoring %s='%s', see %s instead",
STONITH_ATTR_ACTION_OP, value, key);
free(key);
} else {
crm_warn("Mapping %s='%s' to %s='%s'",
STONITH_ATTR_ACTION_OP, value, key, value);
g_hash_table_insert(params, key, pcmk__str_copy(value));
}
}
/*!
* \internal
* \brief Create device parameter table from XML
*
* \param[in] name Device name (used for logging only)
* \param[in] dev XML containing device parameters
*/
static GHashTable *
xml2device_params(const char *name, const xmlNode *dev)
{
GHashTable *params = xml2list(dev);
const char *value;
/* Action should never be specified in the device configuration,
* but we support it for users who are familiar with other software
* that worked that way.
*/
value = g_hash_table_lookup(params, STONITH_ATTR_ACTION_OP);
if (value != NULL) {
crm_warn("%s has '%s' parameter, which should never be specified in configuration",
name, STONITH_ATTR_ACTION_OP);
if (*value == '\0') {
crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP);
} else if (strcmp(value, PCMK_ACTION_REBOOT) == 0) {
crm_warn("Ignoring %s='reboot' (see " PCMK_OPT_STONITH_ACTION
" cluster property instead)",
STONITH_ATTR_ACTION_OP);
} else if (strcmp(value, PCMK_ACTION_OFF) == 0) {
map_action(params, PCMK_ACTION_REBOOT, value);
} else {
map_action(params, PCMK_ACTION_OFF, value);
map_action(params, PCMK_ACTION_REBOOT, value);
}
g_hash_table_remove(params, STONITH_ATTR_ACTION_OP);
}
return params;
}
static const char *
target_list_type(stonith_device_t * dev)
{
const char *check_type = NULL;
check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK);
if (check_type == NULL) {
if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) {
check_type = PCMK_VALUE_STATIC_LIST;
} else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) {
check_type = PCMK_VALUE_STATIC_LIST;
} else if (pcmk_is_set(dev->flags, st_device_supports_list)) {
check_type = PCMK_VALUE_DYNAMIC_LIST;
} else if (pcmk_is_set(dev->flags, st_device_supports_status)) {
check_type = PCMK_VALUE_STATUS;
} else {
check_type = PCMK_VALUE_NONE;
}
}
return check_type;
}
static stonith_device_t *
build_device_from_xml(xmlNode *dev)
{
const char *value;
stonith_device_t *device = NULL;
char *agent = crm_element_value_copy(dev, PCMK_XA_AGENT);
CRM_CHECK(agent != NULL, return device);
device = pcmk__assert_alloc(1, sizeof(stonith_device_t));
device->id = crm_element_value_copy(dev, PCMK_XA_ID);
device->agent = agent;
device->namespace = crm_element_value_copy(dev, PCMK__XA_NAMESPACE);
device->params = xml2device_params(device->id, dev);
value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST);
if (value) {
device->targets = stonith__parse_targets(value);
}
value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP);
device->aliases = build_port_aliases(value, &(device->targets));
value = target_list_type(device);
if (!pcmk__str_eq(value, PCMK_VALUE_STATIC_LIST, pcmk__str_casei)
&& (device->targets != NULL)) {
// device->targets is necessary only with PCMK_VALUE_STATIC_LIST
g_list_free_full(device->targets, free);
device->targets = NULL;
}
switch (get_agent_metadata(device->agent, &device->agent_metadata)) {
case pcmk_rc_ok:
if (device->agent_metadata) {
read_action_metadata(device);
stonith__device_parameter_flags(&(device->flags), device->id,
device->agent_metadata);
}
break;
case EAGAIN:
if (device->timer == NULL) {
device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000,
TRUE, get_agent_metadata_cb, device);
}
if (!mainloop_timer_running(device->timer)) {
mainloop_timer_start(device->timer);
}
break;
default:
break;
}
value = g_hash_table_lookup(device->params, "nodeid");
if (!value) {
device->include_nodeid = is_nodeid_required(device->agent_metadata);
}
value = crm_element_value(dev, PCMK__XA_RSC_PROVIDES);
if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) {
device->automatic_unfencing = TRUE;
}
if (is_action_required(PCMK_ACTION_ON, device)) {
crm_info("Fencing device '%s' requires unfencing", device->id);
}
if (device->on_target_actions != NULL) {
crm_info("Fencing device '%s' requires actions (%s) to be executed "
"on target", device->id,
(const char *) device->on_target_actions->str);
}
device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
/* TODO: Hook up priority */
return device;
}
static void
schedule_internal_command(const char *origin,
stonith_device_t * device,
const char *action,
const char *target,
int timeout,
void *internal_user_data,
void (*done_cb) (int pid,
const pcmk__action_result_t *result,
void *user_data))
{
async_command_t *cmd = NULL;
cmd = pcmk__assert_alloc(1, sizeof(async_command_t));
cmd->id = -1;
cmd->default_timeout = timeout ? timeout : 60;
cmd->timeout = cmd->default_timeout;
cmd->action = pcmk__str_copy(action);
cmd->target = pcmk__str_copy(target);
cmd->device = pcmk__str_copy(device->id);
cmd->origin = pcmk__str_copy(origin);
cmd->client = pcmk__str_copy(crm_system_name);
cmd->client_name = pcmk__str_copy(crm_system_name);
cmd->internal_user_data = internal_user_data;
cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */
schedule_stonith_command(cmd, device);
}
// Fence agent status commands use custom exit status codes
enum fence_status_code {
fence_status_invalid = -1,
fence_status_active = 0,
fence_status_unknown = 1,
fence_status_inactive = 2,
};
static void
status_search_cb(int pid, const pcmk__action_result_t *result, void *user_data)
{
async_command_t *cmd = user_data;
struct device_search_s *search = cmd->internal_user_data;
stonith_device_t *dev = cmd_device(cmd);
gboolean can = FALSE;
free_async_command(cmd);
if (!dev) {
search_devices_record_result(search, NULL, FALSE);
return;
}
mainloop_set_trigger(dev->work);
if (result->execution_status != PCMK_EXEC_DONE) {
crm_warn("Assuming %s cannot fence %s "
"because status could not be executed: %s%s%s%s",
dev->id, search->host,
pcmk_exec_status_str(result->execution_status),
((result->exit_reason == NULL)? "" : " ("),
((result->exit_reason == NULL)? "" : result->exit_reason),
((result->exit_reason == NULL)? "" : ")"));
search_devices_record_result(search, dev->id, FALSE);
return;
}
switch (result->exit_status) {
case fence_status_unknown:
crm_trace("%s reported it cannot fence %s", dev->id, search->host);
break;
case fence_status_active:
case fence_status_inactive:
crm_trace("%s reported it can fence %s", dev->id, search->host);
can = TRUE;
break;
default:
crm_warn("Assuming %s cannot fence %s "
"(status returned unknown code %d)",
dev->id, search->host, result->exit_status);
break;
}
search_devices_record_result(search, dev->id, can);
}
static void
dynamic_list_search_cb(int pid, const pcmk__action_result_t *result,
void *user_data)
{
async_command_t *cmd = user_data;
struct device_search_s *search = cmd->internal_user_data;
stonith_device_t *dev = cmd_device(cmd);
gboolean can_fence = FALSE;
free_async_command(cmd);
/* Host/alias must be in the list output to be eligible to be fenced
*
* Will cause problems if down'd nodes aren't listed or (for virtual nodes)
* if the guest is still listed despite being moved to another machine
*/
if (!dev) {
search_devices_record_result(search, NULL, FALSE);
return;
}
mainloop_set_trigger(dev->work);
if (pcmk__result_ok(result)) {
crm_info("Refreshing target list for %s", dev->id);
g_list_free_full(dev->targets, free);
dev->targets = stonith__parse_targets(result->action_stdout);
dev->targets_age = time(NULL);
} else if (dev->targets != NULL) {
if (result->execution_status == PCMK_EXEC_DONE) {
crm_info("Reusing most recent target list for %s "
"because list returned error code %d",
dev->id, result->exit_status);
} else {
crm_info("Reusing most recent target list for %s "
"because list could not be executed: %s%s%s%s",
dev->id, pcmk_exec_status_str(result->execution_status),
((result->exit_reason == NULL)? "" : " ("),
((result->exit_reason == NULL)? "" : result->exit_reason),
((result->exit_reason == NULL)? "" : ")"));
}
} else { // We have never successfully executed list
if (result->execution_status == PCMK_EXEC_DONE) {
crm_warn("Assuming %s cannot fence %s "
"because list returned error code %d",
dev->id, search->host, result->exit_status);
} else {
crm_warn("Assuming %s cannot fence %s "
"because list could not be executed: %s%s%s%s",
dev->id, search->host,
pcmk_exec_status_str(result->execution_status),
((result->exit_reason == NULL)? "" : " ("),
((result->exit_reason == NULL)? "" : result->exit_reason),
((result->exit_reason == NULL)? "" : ")"));
}
/* Fall back to pcmk_host_check=PCMK_VALUE_STATUS if the user didn't
* explicitly specify PCMK_VALUE_DYNAMIC_LIST
*/
if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) {
crm_notice("Switching to pcmk_host_check='status' for %s", dev->id);
pcmk__insert_dup(dev->params, PCMK_STONITH_HOST_CHECK,
PCMK_VALUE_STATUS);
}
}
if (dev->targets) {
const char *alias = g_hash_table_lookup(dev->aliases, search->host);
if (!alias) {
alias = search->host;
}
if (pcmk__str_in_list(alias, dev->targets, pcmk__str_casei)) {
can_fence = TRUE;
}
}
search_devices_record_result(search, dev->id, can_fence);
}
/*!
* \internal
* \brief Returns true if any key in first is not in second or second has a different value for key
*/
static int
device_params_diff(GHashTable *first, GHashTable *second) {
char *key = NULL;
char *value = NULL;
GHashTableIter gIter;
g_hash_table_iter_init(&gIter, first);
while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) {
if(strstr(key, "CRM_meta") == key) {
continue;
} else if (strcmp(key, PCMK_XA_CRM_FEATURE_SET) == 0) {
continue;
} else {
char *other_value = g_hash_table_lookup(second, key);
if (!other_value || !pcmk__str_eq(other_value, value, pcmk__str_casei)) {
crm_trace("Different value for %s: %s != %s", key, other_value, value);
return 1;
}
}
}
return 0;
}
/*!
* \internal
* \brief Checks to see if an identical device already exists in the device_list
*/
static stonith_device_t *
device_has_duplicate(const stonith_device_t *device)
{
stonith_device_t *dup = g_hash_table_lookup(device_list, device->id);
if (!dup) {
crm_trace("No match for %s", device->id);
return NULL;
} else if (!pcmk__str_eq(dup->agent, device->agent, pcmk__str_casei)) {
crm_trace("Different agent: %s != %s", dup->agent, device->agent);
return NULL;
}
/* Use calculate_operation_digest() here? */
if (device_params_diff(device->params, dup->params) ||
device_params_diff(dup->params, device->params)) {
return NULL;
}
crm_trace("Match");
return dup;
}
int
stonith_device_register(xmlNode *dev, gboolean from_cib)
{
stonith_device_t *dup = NULL;
stonith_device_t *device = build_device_from_xml(dev);
guint ndevices = 0;
int rv = pcmk_ok;
CRM_CHECK(device != NULL, return -ENOMEM);
/* do we have a watchdog-device? */
if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none) ||
pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT,
STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) do {
if (stonith_watchdog_timeout_ms <= 0) {
crm_err("Ignoring watchdog fence device without "
PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " set.");
rv = -ENODEV;
/* fall through to cleanup & return */
} else if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT,
STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) {
crm_err("Ignoring watchdog fence device with unknown "
"agent '%s' unequal '" STONITH_WATCHDOG_AGENT "'.",
device->agent?device->agent:"");
rv = -ENODEV;
/* fall through to cleanup & return */
} else if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID,
pcmk__str_none)) {
crm_err("Ignoring watchdog fence device "
"named %s !='"STONITH_WATCHDOG_ID"'.",
device->id?device->id:"");
rv = -ENODEV;
/* fall through to cleanup & return */
} else {
if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT,
pcmk__str_none)) {
/* this either has an empty list or the targets
configured for watchdog-fencing
*/
g_list_free_full(stonith_watchdog_targets, free);
stonith_watchdog_targets = device->targets;
device->targets = NULL;
}
if (node_does_watchdog_fencing(stonith_our_uname)) {
g_list_free_full(device->targets, free);
device->targets = stonith__parse_targets(stonith_our_uname);
pcmk__insert_dup(device->params,
PCMK_STONITH_HOST_LIST, stonith_our_uname);
/* proceed as with any other stonith-device */
break;
}
crm_debug("Skip registration of watchdog fence device on node not in host-list.");
/* cleanup and fall through to more cleanup and return */
device->targets = NULL;
stonith_device_remove(device->id, from_cib);
}
free_device(device);
return rv;
} while (0);
dup = device_has_duplicate(device);
if (dup) {
ndevices = g_hash_table_size(device_list);
crm_debug("Device '%s' already in device list (%d active device%s)",
device->id, ndevices, pcmk__plural_s(ndevices));
free_device(device);
device = dup;
dup = g_hash_table_lookup(device_list, device->id);
dup->dirty = FALSE;
} else {
stonith_device_t *old = g_hash_table_lookup(device_list, device->id);
if (from_cib && old && old->api_registered) {
/* If the cib is writing over an entry that is shared with a stonith client,
* copy any pending ops that currently exist on the old entry to the new one.
* Otherwise the pending ops will be reported as failures
*/
crm_info("Overwriting existing entry for %s from CIB", device->id);
device->pending_ops = old->pending_ops;
device->api_registered = TRUE;
old->pending_ops = NULL;
if (device->pending_ops) {
mainloop_set_trigger(device->work);
}
}
g_hash_table_replace(device_list, device->id, device);
ndevices = g_hash_table_size(device_list);
crm_notice("Added '%s' to device list (%d active device%s)",
device->id, ndevices, pcmk__plural_s(ndevices));
}
if (from_cib) {
device->cib_registered = TRUE;
} else {
device->api_registered = TRUE;
}
return pcmk_ok;
}
void
stonith_device_remove(const char *id, bool from_cib)
{
stonith_device_t *device = g_hash_table_lookup(device_list, id);
guint ndevices = 0;
if (!device) {
ndevices = g_hash_table_size(device_list);
crm_info("Device '%s' not found (%d active device%s)",
id, ndevices, pcmk__plural_s(ndevices));
return;
}
if (from_cib) {
device->cib_registered = FALSE;
} else {
device->verified = FALSE;
device->api_registered = FALSE;
}
if (!device->cib_registered && !device->api_registered) {
g_hash_table_remove(device_list, id);
ndevices = g_hash_table_size(device_list);
crm_info("Removed '%s' from device list (%d active device%s)",
id, ndevices, pcmk__plural_s(ndevices));
} else {
crm_trace("Not removing '%s' from device list (%d active) because "
"still registered via:%s%s",
id, g_hash_table_size(device_list),
(device->cib_registered? " cib" : ""),
(device->api_registered? " api" : ""));
}
}
/*!
* \internal
* \brief Return the number of stonith levels registered for a node
*
* \param[in] tp Node's topology table entry
*
* \return Number of non-NULL levels in topology entry
* \note This function is used only for log messages.
*/
static int
count_active_levels(const stonith_topology_t *tp)
{
int lpc = 0;
int count = 0;
- for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
+ for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) {
if (tp->levels[lpc] != NULL) {
count++;
}
}
return count;
}
static void
free_topology_entry(gpointer data)
{
stonith_topology_t *tp = data;
int lpc = 0;
- for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
+ for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) {
if (tp->levels[lpc] != NULL) {
g_list_free_full(tp->levels[lpc], free);
}
}
free(tp->target);
free(tp->target_value);
free(tp->target_pattern);
free(tp->target_attribute);
free(tp);
}
void
free_topology_list(void)
{
if (topology != NULL) {
g_hash_table_destroy(topology);
topology = NULL;
}
}
void
init_topology_list(void)
{
if (topology == NULL) {
topology = pcmk__strkey_table(NULL, free_topology_entry);
}
}
char *
stonith_level_key(const xmlNode *level, enum fenced_target_by mode)
{
if (mode == fenced_target_by_unknown) {
mode = unpack_level_kind(level);
}
switch (mode) {
case fenced_target_by_name:
return crm_element_value_copy(level, PCMK_XA_TARGET);
case fenced_target_by_pattern:
return crm_element_value_copy(level, PCMK_XA_TARGET_PATTERN);
case fenced_target_by_attribute:
return crm_strdup_printf("%s=%s",
crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE),
crm_element_value(level, PCMK_XA_TARGET_VALUE));
default:
return crm_strdup_printf("unknown-%s", pcmk__xe_id(level));
}
}
/*!
* \internal
* \brief Parse target identification from topology level XML
*
* \param[in] level Topology level XML to parse
*
* \return How to identify target of \p level
*/
static enum fenced_target_by
unpack_level_kind(const xmlNode *level)
{
if (crm_element_value(level, PCMK_XA_TARGET) != NULL) {
return fenced_target_by_name;
}
if (crm_element_value(level, PCMK_XA_TARGET_PATTERN) != NULL) {
return fenced_target_by_pattern;
}
if (!stand_alone /* if standalone, there's no attribute manager */
&& (crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE) != NULL)
&& (crm_element_value(level, PCMK_XA_TARGET_VALUE) != NULL)) {
return fenced_target_by_attribute;
}
return fenced_target_by_unknown;
}
static stonith_key_value_t *
parse_device_list(const char *devices)
{
int lpc = 0;
int max = 0;
int last = 0;
stonith_key_value_t *output = NULL;
if (devices == NULL) {
return output;
}
max = strlen(devices);
for (lpc = 0; lpc <= max; lpc++) {
if (devices[lpc] == ',' || devices[lpc] == 0) {
char *line = strndup(devices + last, lpc - last);
output = stonith_key_value_add(output, NULL, line);
free(line);
last = lpc + 1;
}
}
return output;
}
/*!
* \internal
* \brief Unpack essential information from topology request XML
*
* \param[in] xml Request XML to search
* \param[out] mode If not NULL, where to store level kind
* \param[out] target If not NULL, where to store representation of target
* \param[out] id If not NULL, where to store level number
* \param[out] desc If not NULL, where to store log-friendly level description
*
* \return Topology level XML from within \p xml, or NULL if not found
* \note The caller is responsible for freeing \p *target and \p *desc if set.
*/
static xmlNode *
unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target,
int *id, char **desc)
{
enum fenced_target_by local_mode = fenced_target_by_unknown;
char *local_target = NULL;
int local_id = 0;
/* The level element can be the top element or lower. If top level, don't
* search by xpath, because it might give multiple hits if the XML is the
* CIB.
*/
if ((xml != NULL) && !pcmk__xe_is(xml, PCMK_XE_FENCING_LEVEL)) {
xml = get_xpath_object("//" PCMK_XE_FENCING_LEVEL, xml, LOG_WARNING);
}
if (xml == NULL) {
if (desc != NULL) {
*desc = crm_strdup_printf("missing");
}
} else {
local_mode = unpack_level_kind(xml);
local_target = stonith_level_key(xml, local_mode);
crm_element_value_int(xml, PCMK_XA_INDEX, &local_id);
if (desc != NULL) {
*desc = crm_strdup_printf("%s[%d]", local_target, local_id);
}
}
if (mode != NULL) {
*mode = local_mode;
}
if (id != NULL) {
*id = local_id;
}
if (target != NULL) {
*target = local_target;
} else {
free(local_target);
}
return xml;
}
/*!
* \internal
* \brief Register a fencing topology level for a target
*
* Given an XML request specifying the target name, level index, and device IDs
* for the level, this will create an entry for the target in the global topology
* table if one does not already exist, then append the specified device IDs to
* the entry's device list for the specified level.
*
* \param[in] msg XML request for STONITH level registration
* \param[out] desc If not NULL, set to string representation "TARGET[LEVEL]"
* \param[out] result Where to set result of registration
*/
void
fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result)
{
int id = 0;
xmlNode *level;
enum fenced_target_by mode;
char *target;
stonith_topology_t *tp;
stonith_key_value_t *dIter = NULL;
stonith_key_value_t *devices = NULL;
CRM_CHECK((msg != NULL) && (result != NULL), return);
level = unpack_level_request(msg, &mode, &target, &id, desc);
if (level == NULL) {
fenced_set_protocol_error(result);
return;
}
// Ensure an ID was given (even the client API adds an ID)
if (pcmk__str_empty(pcmk__xe_id(level))) {
crm_warn("Ignoring registration for topology level without ID");
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Topology level is invalid without ID");
return;
}
// Ensure a valid target was specified
if (mode == fenced_target_by_unknown) {
crm_warn("Ignoring registration for topology level '%s' "
"without valid target", pcmk__xe_id(level));
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Invalid target for topology level '%s'",
pcmk__xe_id(level));
return;
}
// Ensure level ID is in allowed range
- if ((id <= 0) || (id >= ST_LEVEL_MAX)) {
+ if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) {
crm_warn("Ignoring topology registration for %s with invalid level %d",
target, id);
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Invalid level number '%s' for topology level '%s'",
pcmk__s(crm_element_value(level, PCMK_XA_INDEX),
""),
pcmk__xe_id(level));
return;
}
/* Find or create topology table entry */
tp = g_hash_table_lookup(topology, target);
if (tp == NULL) {
tp = pcmk__assert_alloc(1, sizeof(stonith_topology_t));
tp->kind = mode;
tp->target = target;
tp->target_value = crm_element_value_copy(level, PCMK_XA_TARGET_VALUE);
tp->target_pattern = crm_element_value_copy(level,
PCMK_XA_TARGET_PATTERN);
tp->target_attribute = crm_element_value_copy(level,
PCMK_XA_TARGET_ATTRIBUTE);
g_hash_table_replace(topology, tp->target, tp);
crm_trace("Added %s (%d) to the topology (%d active entries)",
target, (int) mode, g_hash_table_size(topology));
} else {
free(target);
}
if (tp->levels[id] != NULL) {
crm_info("Adding to the existing %s[%d] topology entry",
tp->target, id);
}
devices = parse_device_list(crm_element_value(level, PCMK_XA_DEVICES));
for (dIter = devices; dIter; dIter = dIter->next) {
const char *device = dIter->value;
crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id);
tp->levels[id] = g_list_append(tp->levels[id], pcmk__str_copy(device));
}
stonith_key_value_freeall(devices, 1, 1);
{
int nlevels = count_active_levels(tp);
crm_info("Target %s has %d active fencing level%s",
tp->target, nlevels, pcmk__plural_s(nlevels));
}
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
}
/*!
* \internal
* \brief Unregister a fencing topology level for a target
*
* Given an XML request specifying the target name and level index (or 0 for all
* levels), this will remove any corresponding entry for the target from the
* global topology table.
*
* \param[in] msg XML request for STONITH level registration
* \param[out] desc If not NULL, set to string representation "TARGET[LEVEL]"
* \param[out] result Where to set result of unregistration
*/
void
fenced_unregister_level(xmlNode *msg, char **desc,
pcmk__action_result_t *result)
{
int id = -1;
stonith_topology_t *tp;
char *target;
xmlNode *level = NULL;
CRM_CHECK(result != NULL, return);
level = unpack_level_request(msg, NULL, &target, &id, desc);
if (level == NULL) {
fenced_set_protocol_error(result);
return;
}
// Ensure level ID is in allowed range
- if ((id < 0) || (id >= ST_LEVEL_MAX)) {
+ if ((id < 0) || (id >= ST__LEVEL_COUNT)) {
crm_warn("Ignoring topology unregistration for %s with invalid level %d",
target, id);
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Invalid level number '%s' for topology level %s",
pcmk__s(crm_element_value(level, PCMK_XA_INDEX),
"<null>"),
// Client API doesn't add ID to unregistration XML
pcmk__s(pcmk__xe_id(level), ""));
return;
}
tp = g_hash_table_lookup(topology, target);
if (tp == NULL) {
guint nentries = g_hash_table_size(topology);
crm_info("No fencing topology found for %s (%d active %s)",
target, nentries,
pcmk__plural_alt(nentries, "entry", "entries"));
} else if (id == 0 && g_hash_table_remove(topology, target)) {
guint nentries = g_hash_table_size(topology);
crm_info("Removed all fencing topology entries related to %s "
"(%d active %s remaining)", target, nentries,
pcmk__plural_alt(nentries, "entry", "entries"));
} else if (tp->levels[id] != NULL) {
guint nlevels;
g_list_free_full(tp->levels[id], free);
tp->levels[id] = NULL;
nlevels = count_active_levels(tp);
crm_info("Removed level %d from fencing topology for %s "
"(%d active level%s remaining)",
id, target, nlevels, pcmk__plural_s(nlevels));
}
free(target);
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
}
static char *
list_to_string(GList *list, const char *delim, gboolean terminate_with_delim)
{
int max = g_list_length(list);
size_t delim_len = delim?strlen(delim):0;
size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0);
char *rv;
GList *gIter;
char *pos = NULL;
const char *lead_delim = "";
for (gIter = list; gIter != NULL; gIter = gIter->next) {
const char *value = (const char *) gIter->data;
alloc_size += strlen(value);
}
rv = pcmk__assert_alloc(alloc_size, sizeof(char));
pos = rv;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
const char *value = (const char *) gIter->data;
pos = &pos[sprintf(pos, "%s%s", lead_delim, value)];
lead_delim = delim;
}
if (max && terminate_with_delim) {
sprintf(pos, "%s", delim);
}
return rv;
}
/*!
* \internal
* \brief Execute a fence agent action directly (and asynchronously)
*
* Handle a STONITH_OP_EXEC API message by scheduling a requested agent action
* directly on a specified device. Only list, monitor, and status actions are
* expected to use this call, though it should work with any agent command.
*
* \param[in] msg Request XML specifying action
* \param[out] result Where to store result of action
*
* \note If the action is monitor, the device must be registered via the API
* (CIB registration is not sufficient), because monitor should not be
* possible unless the device is "started" (API registered).
*/
static void
execute_agent_action(xmlNode *msg, pcmk__action_result_t *result)
{
xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, msg, LOG_ERR);
xmlNode *op = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, msg,
LOG_ERR);
const char *id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
const char *action = crm_element_value(op, PCMK__XA_ST_DEVICE_ACTION);
async_command_t *cmd = NULL;
stonith_device_t *device = NULL;
if ((id == NULL) || (action == NULL)) {
crm_info("Malformed API action request: device %s, action %s",
(id? id : "not specified"),
(action? action : "not specified"));
fenced_set_protocol_error(result);
return;
}
if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
// Watchdog agent actions are implemented internally
if (stonith_watchdog_timeout_ms <= 0) {
pcmk__set_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"Watchdog fence device not configured");
return;
} else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) {
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_result_output(result,
list_to_string(stonith_watchdog_targets,
"\n", TRUE),
NULL);
return;
} else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) {
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return;
}
}
device = g_hash_table_lookup(device_list, id);
if (device == NULL) {
crm_info("Ignoring API '%s' action request because device %s not found",
action, id);
pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"'%s' not found", id);
return;
} else if (!device->api_registered
&& (strcmp(action, PCMK_ACTION_MONITOR) == 0)) {
// Monitors may run only on "started" (API-registered) devices
crm_info("Ignoring API '%s' action request because device %s not active",
action, id);
pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"'%s' not active", id);
return;
}
cmd = create_async_command(msg);
if (cmd == NULL) {
crm_log_xml_warn(msg, "invalid");
fenced_set_protocol_error(result);
return;
}
schedule_stonith_command(cmd, device);
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL);
}
static void
search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence)
{
search->replies_received++;
if (can_fence && device) {
if (search->support_action_only != st_device_supports_none) {
stonith_device_t *dev = g_hash_table_lookup(device_list, device);
if (dev && !pcmk_is_set(dev->flags, search->support_action_only)) {
return;
}
}
search->capable = g_list_append(search->capable,
pcmk__str_copy(device));
}
if (search->replies_needed == search->replies_received) {
guint ndevices = g_list_length(search->capable);
crm_debug("Search found %d device%s that can perform '%s' targeting %s",
ndevices, pcmk__plural_s(ndevices),
(search->action? search->action : "unknown action"),
(search->host? search->host : "any node"));
search->callback(search->capable, search->user_data);
free(search->host);
free(search->action);
free(search);
}
}
/*!
* \internal
* \brief Check whether the local host is allowed to execute a fencing action
*
* \param[in] device Fence device to check
* \param[in] action Fence action to check
* \param[in] target Hostname of fence target
* \param[in] allow_suicide Whether self-fencing is allowed for this operation
*
* \return TRUE if local host is allowed to execute action, FALSE otherwise
*/
static gboolean
localhost_is_eligible(const stonith_device_t *device, const char *action,
const char *target, gboolean allow_suicide)
{
gboolean localhost_is_target = pcmk__str_eq(target, stonith_our_uname,
pcmk__str_casei);
if ((device != NULL) && (action != NULL)
&& (device->on_target_actions != NULL)
&& (strstr((const char*) device->on_target_actions->str,
action) != NULL)) {
if (!localhost_is_target) {
crm_trace("Operation '%s' using %s can only be executed for local "
"host, not %s", action, device->id, target);
return FALSE;
}
} else if (localhost_is_target && !allow_suicide) {
crm_trace("'%s' operation does not support self-fencing", action);
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Check if local node is allowed to execute (possibly remapped) action
*
* \param[in] device Fence device to check
* \param[in] action Fence action to check
* \param[in] target Node name of fence target
* \param[in] allow_self Whether self-fencing is allowed for this operation
*
* \return true if local node is allowed to execute \p action or any actions it
* might be remapped to, otherwise false
*/
static bool
localhost_is_eligible_with_remap(const stonith_device_t *device,
const char *action, const char *target,
gboolean allow_self)
{
// Check exact action
if (localhost_is_eligible(device, action, target, allow_self)) {
return true;
}
// Check potential remaps
if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* "reboot" might get remapped to "off" then "on", so even if reboot is
* disallowed, return true if either of those is allowed. We'll report
* the disallowed actions with the results. We never allow self-fencing
* for remapped "on" actions because the target is off at that point.
*/
if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self)
|| localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) {
return true;
}
}
return false;
}
static void
can_fence_host_with_device(stonith_device_t *dev,
struct device_search_s *search)
{
gboolean can = FALSE;
const char *check_type = "Internal bug";
const char *target = NULL;
const char *alias = NULL;
const char *dev_id = "Unspecified device";
const char *action = (search == NULL)? NULL : search->action;
CRM_CHECK((dev != NULL) && (action != NULL), goto search_report_results);
if (dev->id != NULL) {
dev_id = dev->id;
}
target = search->host;
if (target == NULL) {
can = TRUE;
check_type = "No target";
goto search_report_results;
}
/* Answer immediately if the device does not support the action
* or the local node is not allowed to perform it
*/
if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)
&& !pcmk_is_set(dev->flags, st_device_supports_on)) {
check_type = "Agent does not support 'on'";
goto search_report_results;
} else if (!localhost_is_eligible_with_remap(dev, action, target,
search->allow_suicide)) {
check_type = "This node is not allowed to execute action";
goto search_report_results;
}
// Check eligibility as specified by pcmk_host_check
check_type = target_list_type(dev);
alias = g_hash_table_lookup(dev->aliases, target);
if (pcmk__str_eq(check_type, PCMK_VALUE_NONE, pcmk__str_casei)) {
can = TRUE;
} else if (pcmk__str_eq(check_type, PCMK_VALUE_STATIC_LIST,
pcmk__str_casei)) {
if (pcmk__str_in_list(target, dev->targets, pcmk__str_casei)) {
can = TRUE;
} else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)
&& g_hash_table_lookup(dev->aliases, target)) {
can = TRUE;
}
} else if (pcmk__str_eq(check_type, PCMK_VALUE_DYNAMIC_LIST,
pcmk__str_casei)) {
time_t now = time(NULL);
if (dev->targets == NULL || dev->targets_age + 60 < now) {
int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST,
search->per_device_timeout);
if (device_timeout > search->per_device_timeout) {
crm_notice("Since the pcmk_list_timeout (%ds) parameter of %s "
"is larger than " PCMK_OPT_STONITH_TIMEOUT
" (%ds), timeout may occur",
device_timeout, dev_id, search->per_device_timeout);
}
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev_id, target, action);
schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL,
search->per_device_timeout, search, dynamic_list_search_cb);
/* we'll respond to this search request async in the cb */
return;
}
if (pcmk__str_in_list(((alias == NULL)? target : alias), dev->targets,
pcmk__str_casei)) {
can = TRUE;
}
} else if (pcmk__str_eq(check_type, PCMK_VALUE_STATUS, pcmk__str_casei)) {
int device_timeout = get_action_timeout(dev, check_type, search->per_device_timeout);
if (device_timeout > search->per_device_timeout) {
crm_notice("Since the pcmk_status_timeout (%ds) parameter of %s is "
"larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), "
"timeout may occur",
device_timeout, dev_id, search->per_device_timeout);
}
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev_id, target, action);
schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target,
search->per_device_timeout, search, status_search_cb);
/* we'll respond to this search request async in the cb */
return;
} else {
crm_err("Invalid value for " PCMK_STONITH_HOST_CHECK ": %s", check_type);
check_type = "Invalid " PCMK_STONITH_HOST_CHECK;
}
search_report_results:
crm_info("%s is%s eligible to fence (%s) %s%s%s%s: %s",
dev_id, (can? "" : " not"), pcmk__s(action, "unspecified action"),
pcmk__s(target, "unspecified target"),
(alias == NULL)? "" : " (as '", pcmk__s(alias, ""),
(alias == NULL)? "" : "')", check_type);
search_devices_record_result(search, ((dev == NULL)? NULL : dev_id), can);
}
static void
search_devices(gpointer key, gpointer value, gpointer user_data)
{
stonith_device_t *dev = value;
struct device_search_s *search = user_data;
can_fence_host_with_device(dev, search);
}
#define DEFAULT_QUERY_TIMEOUT 20
static void
get_capable_devices(const char *host, const char *action, int timeout, bool suicide, void *user_data,
void (*callback) (GList * devices, void *user_data), uint32_t support_action_only)
{
struct device_search_s *search;
guint ndevices = g_hash_table_size(device_list);
if (ndevices == 0) {
callback(NULL, user_data);
return;
}
search = pcmk__assert_alloc(1, sizeof(struct device_search_s));
search->host = pcmk__str_copy(host);
search->action = pcmk__str_copy(action);
search->per_device_timeout = timeout;
search->allow_suicide = suicide;
search->callback = callback;
search->user_data = user_data;
search->support_action_only = support_action_only;
/* We are guaranteed this many replies, even if a device is
* unregistered while the search is in progress.
*/
search->replies_needed = ndevices;
crm_debug("Searching %d device%s to see which can execute '%s' targeting %s",
ndevices, pcmk__plural_s(ndevices),
(search->action? search->action : "unknown action"),
(search->host? search->host : "any node"));
g_hash_table_foreach(device_list, search_devices, search);
}
struct st_query_data {
xmlNode *reply;
char *remote_peer;
char *client_id;
char *target;
char *action;
int call_options;
};
/*!
* \internal
* \brief Add action-specific attributes to query reply XML
*
* \param[in,out] xml XML to add attributes to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
*/
static void
add_action_specific_attributes(xmlNode *xml, const char *action,
const stonith_device_t *device,
const char *target)
{
int action_specific_timeout;
int delay_max;
int delay_base;
CRM_CHECK(xml && action && device, return);
// PCMK__XA_ST_REQUIRED is currently used only for unfencing
if (is_action_required(action, device)) {
crm_trace("Action '%s' is required using %s", action, device->id);
crm_xml_add_int(xml, PCMK__XA_ST_REQUIRED, 1);
}
// pcmk_<action>_timeout if configured
action_specific_timeout = get_action_timeout(device, action, 0);
if (action_specific_timeout) {
crm_trace("Action '%s' has timeout %ds using %s",
action, action_specific_timeout, device->id);
crm_xml_add_int(xml, PCMK__XA_ST_ACTION_TIMEOUT,
action_specific_timeout);
}
delay_max = get_action_delay_max(device, action);
if (delay_max > 0) {
crm_trace("Action '%s' has maximum random delay %ds using %s",
action, delay_max, device->id);
crm_xml_add_int(xml, PCMK__XA_ST_DELAY_MAX, delay_max);
}
delay_base = get_action_delay_base(device, action, target);
if (delay_base > 0) {
crm_xml_add_int(xml, PCMK__XA_ST_DELAY_BASE, delay_base);
}
if ((delay_max > 0) && (delay_base == 0)) {
crm_trace("Action '%s' has maximum random delay %ds using %s",
action, delay_max, device->id);
} else if ((delay_max == 0) && (delay_base > 0)) {
crm_trace("Action '%s' has a static delay of %ds using %s",
action, delay_base, device->id);
} else if ((delay_max > 0) && (delay_base > 0)) {
crm_trace("Action '%s' has a minimum delay of %ds and a randomly chosen "
"maximum delay of %ds using %s",
action, delay_base, delay_max, device->id);
}
}
/*!
* \internal
* \brief Add "disallowed" attribute to query reply XML if appropriate
*
* \param[in,out] xml XML to add attribute to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
* \param[in] allow_suicide Whether self-fencing is allowed
*/
static void
add_disallowed(xmlNode *xml, const char *action, const stonith_device_t *device,
const char *target, gboolean allow_suicide)
{
if (!localhost_is_eligible(device, action, target, allow_suicide)) {
crm_trace("Action '%s' using %s is disallowed for local host",
action, device->id);
pcmk__xe_set_bool_attr(xml, PCMK__XA_ST_ACTION_DISALLOWED, true);
}
}
/*!
* \internal
* \brief Add child element with action-specific values to query reply XML
*
* \param[in,out] xml XML to add attribute to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
* \param[in] allow_suicide Whether self-fencing is allowed
*/
static void
add_action_reply(xmlNode *xml, const char *action,
const stonith_device_t *device, const char *target,
gboolean allow_suicide)
{
xmlNode *child = pcmk__xe_create(xml, PCMK__XE_ST_DEVICE_ACTION);
crm_xml_add(child, PCMK_XA_ID, action);
add_action_specific_attributes(child, action, device, target);
add_disallowed(child, action, device, target, allow_suicide);
}
/*!
* \internal
* \brief Send a reply to a CPG peer or IPC client
*
* \param[in] reply XML reply to send
* \param[in] call_options Send synchronously if st_opt_sync_call is set
* \param[in] remote_peer If not NULL, name of peer node to send CPG reply
* \param[in,out] client If not NULL, client to send IPC reply
*/
static void
stonith_send_reply(const xmlNode *reply, int call_options,
const char *remote_peer, pcmk__client_t *client)
{
CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)),
return);
if (remote_peer == NULL) {
do_local_reply(reply, client, call_options);
} else {
const crm_node_t *node =
pcmk__get_node(0, remote_peer, NULL,
pcmk__node_search_cluster_member);
pcmk__cluster_send_message(node, crm_msg_stonith_ng, reply);
}
}
static void
stonith_query_capable_device_cb(GList * devices, void *user_data)
{
struct st_query_data *query = user_data;
int available_devices = 0;
xmlNode *wrapper = NULL;
xmlNode *list = NULL;
GList *lpc = NULL;
pcmk__client_t *client = NULL;
if (query->client_id != NULL) {
client = pcmk__find_client_by_id(query->client_id);
if ((client == NULL) && (query->remote_peer == NULL)) {
crm_trace("Skipping reply to %s: no longer a client",
query->client_id);
goto done;
}
}
// Pack the results into XML
wrapper = pcmk__xe_create(query->reply, PCMK__XE_ST_CALLDATA);
list = pcmk__xe_create(wrapper, __func__);
crm_xml_add(list, PCMK__XA_ST_TARGET, query->target);
for (lpc = devices; lpc != NULL; lpc = lpc->next) {
stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data);
const char *action = query->action;
xmlNode *dev = NULL;
if (!device) {
/* It is possible the device got unregistered while
* determining who can fence the target */
continue;
}
available_devices++;
dev = pcmk__xe_create(list, PCMK__XE_ST_DEVICE_ID);
crm_xml_add(dev, PCMK_XA_ID, device->id);
crm_xml_add(dev, PCMK__XA_NAMESPACE, device->namespace);
crm_xml_add(dev, PCMK_XA_AGENT, device->agent);
// Has had successful monitor, list, or status on this node
crm_xml_add_int(dev, PCMK__XA_ST_MONITOR_VERIFIED, device->verified);
crm_xml_add_int(dev, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, device->flags);
/* If the originating fencer wants to reboot the node, and we have a
* capable device that doesn't support "reboot", remap to "off" instead.
*/
if (!pcmk_is_set(device->flags, st_device_supports_reboot)
&& pcmk__str_eq(query->action, PCMK_ACTION_REBOOT,
pcmk__str_none)) {
crm_trace("%s doesn't support reboot, using values for off instead",
device->id);
action = PCMK_ACTION_OFF;
}
/* Add action-specific values if available */
add_action_specific_attributes(dev, action, device, query->target);
if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* A "reboot" *might* get remapped to "off" then "on", so after
* sending the "reboot"-specific values in the main element, we add
* sub-elements for "off" and "on" values.
*
* We short-circuited earlier if "reboot", "off" and "on" are all
* disallowed for the local host. However if only one or two are
* disallowed, we send back the results and mark which ones are
* disallowed. If "reboot" is disallowed, this might cause problems
* with older fencer versions, which won't check for it. Older
* versions will ignore "off" and "on", so they are not a problem.
*/
add_disallowed(dev, action, device, query->target,
pcmk_is_set(query->call_options, st_opt_allow_suicide));
add_action_reply(dev, PCMK_ACTION_OFF, device, query->target,
pcmk_is_set(query->call_options, st_opt_allow_suicide));
add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE);
}
/* A query without a target wants device parameters */
if (query->target == NULL) {
xmlNode *attrs = pcmk__xe_create(dev, PCMK__XE_ATTRIBUTES);
g_hash_table_foreach(device->params, hash2field, attrs);
}
}
crm_xml_add_int(list, PCMK__XA_ST_AVAILABLE_DEVICES, available_devices);
if (query->target) {
crm_debug("Found %d matching device%s for target '%s'",
available_devices, pcmk__plural_s(available_devices),
query->target);
} else {
crm_debug("%d device%s installed",
available_devices, pcmk__plural_s(available_devices));
}
crm_log_xml_trace(list, "query-result");
stonith_send_reply(query->reply, query->call_options, query->remote_peer,
client);
done:
free_xml(query->reply);
free(query->remote_peer);
free(query->client_id);
free(query->target);
free(query->action);
free(query);
g_list_free_full(devices, free);
}
/*!
* \internal
* \brief Log the result of an asynchronous command
*
* \param[in] cmd Command the result is for
* \param[in] result Result of command
* \param[in] pid Process ID of command, if available
* \param[in] next Alternate device that will be tried if command failed
* \param[in] op_merged Whether this command was merged with an earlier one
*/
static void
log_async_result(const async_command_t *cmd,
const pcmk__action_result_t *result,
int pid, const char *next, bool op_merged)
{
int log_level = LOG_ERR;
int output_log_level = LOG_NEVER;
guint devices_remaining = g_list_length(cmd->next_device_iter);
GString *msg = g_string_sized_new(80); // Reasonable starting size
// Choose log levels appropriately if we have a result
if (pcmk__result_ok(result)) {
log_level = (cmd->target == NULL)? LOG_DEBUG : LOG_NOTICE;
if ((result->action_stdout != NULL)
&& !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA,
pcmk__str_none)) {
output_log_level = LOG_DEBUG;
}
next = NULL;
} else {
log_level = (cmd->target == NULL)? LOG_NOTICE : LOG_ERR;
if ((result->action_stdout != NULL)
&& !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA,
pcmk__str_none)) {
output_log_level = LOG_WARNING;
}
}
// Build the log message piece by piece
pcmk__g_strcat(msg, "Operation '", cmd->action, "' ", NULL);
if (pid != 0) {
g_string_append_printf(msg, "[%d] ", pid);
}
if (cmd->target != NULL) {
pcmk__g_strcat(msg, "targeting ", cmd->target, " ", NULL);
}
if (cmd->device != NULL) {
pcmk__g_strcat(msg, "using ", cmd->device, " ", NULL);
}
// Add exit status or execution status as appropriate
if (result->execution_status == PCMK_EXEC_DONE) {
g_string_append_printf(msg, "returned %d", result->exit_status);
} else {
pcmk__g_strcat(msg, "could not be executed: ",
pcmk_exec_status_str(result->execution_status), NULL);
}
// Add exit reason and next device if appropriate
if (result->exit_reason != NULL) {
pcmk__g_strcat(msg, " (", result->exit_reason, ")", NULL);
}
if (next != NULL) {
pcmk__g_strcat(msg, ", retrying with ", next, NULL);
}
if (devices_remaining > 0) {
g_string_append_printf(msg, " (%u device%s remaining)",
(unsigned int) devices_remaining,
pcmk__plural_s(devices_remaining));
}
g_string_append_printf(msg, " " CRM_XS " %scall %d from %s",
(op_merged? "merged " : ""), cmd->id,
cmd->client_name);
// Log the result
do_crm_log(log_level, "%s", msg->str);
g_string_free(msg, TRUE);
// Log the output (which may have multiple lines), if appropriate
if (output_log_level != LOG_NEVER) {
char *prefix = crm_strdup_printf("%s[%d]", cmd->device, pid);
crm_log_output(output_log_level, prefix, result->action_stdout);
free(prefix);
}
}
/*!
* \internal
* \brief Reply to requester after asynchronous command completion
*
* \param[in] cmd Command that completed
* \param[in] result Result of command
* \param[in] pid Process ID of command, if available
* \param[in] merged If true, command was merged with another, not executed
*/
static void
send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result,
int pid, bool merged)
{
xmlNode *reply = NULL;
pcmk__client_t *client = NULL;
CRM_CHECK((cmd != NULL) && (result != NULL), return);
log_async_result(cmd, result, pid, NULL, merged);
if (cmd->client != NULL) {
client = pcmk__find_client_by_id(cmd->client);
if ((client == NULL) && (cmd->origin == NULL)) {
crm_trace("Skipping reply to %s: no longer a client", cmd->client);
return;
}
}
reply = construct_async_reply(cmd, result);
if (merged) {
pcmk__xe_set_bool_attr(reply, PCMK__XA_ST_OP_MERGED, true);
}
if (!stand_alone && pcmk__is_fencing_action(cmd->action)
&& pcmk__str_eq(cmd->origin, cmd->target, pcmk__str_casei)) {
/* The target was also the originator, so broadcast the result on its
* behalf (since it will be unable to).
*/
crm_trace("Broadcast '%s' result for %s (target was also originator)",
cmd->action, cmd->target);
crm_xml_add(reply, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST);
crm_xml_add(reply, PCMK__XA_ST_OP, STONITH_OP_NOTIFY);
pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, reply);
} else {
// Reply only to the originator
stonith_send_reply(reply, cmd->options, cmd->origin, client);
}
crm_log_xml_trace(reply, "Reply");
free_xml(reply);
if (stand_alone) {
/* Do notification with a clean data object */
xmlNode *notify_data = pcmk__xe_create(NULL, PCMK__XE_ST_NOTIFY_FENCE);
stonith__xe_set_result(notify_data, result);
crm_xml_add(notify_data, PCMK__XA_ST_TARGET, cmd->target);
crm_xml_add(notify_data, PCMK__XA_ST_OP, cmd->op);
crm_xml_add(notify_data, PCMK__XA_ST_DELEGATE, "localhost");
crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ID, cmd->device);
crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id);
crm_xml_add(notify_data, PCMK__XA_ST_ORIGIN, cmd->client);
fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, result,
notify_data);
fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL);
}
}
static void
cancel_stonith_command(async_command_t * cmd)
{
stonith_device_t *device = cmd_device(cmd);
if (device) {
crm_trace("Cancel scheduled '%s' action using %s",
cmd->action, device->id);
device->pending_ops = g_list_remove(device->pending_ops, cmd);
}
}
/*!
* \internal
* \brief Cancel and reply to any duplicates of a just-completed operation
*
* Check whether any fencing operations are scheduled to do the same thing as
* one that just succeeded. If so, rather than performing the same operation
* twice, return the result of this operation for all matching pending commands.
*
* \param[in,out] cmd Fencing operation that just succeeded
* \param[in] result Result of \p cmd
* \param[in] pid If nonzero, process ID of agent invocation (for logs)
*
* \note Duplicate merging will do the right thing for either type of remapped
* reboot. If the executing fencer remapped an unsupported reboot to off,
* then cmd->action will be "reboot" and will be merged with any other
* reboot requests. If the originating fencer remapped a topology reboot
* to off then on, we will get here once with cmd->action "off" and once
* with "on", and they will be merged separately with similar requests.
*/
static void
reply_to_duplicates(async_command_t *cmd, const pcmk__action_result_t *result,
int pid)
{
GList *next = NULL;
for (GList *iter = cmd_list; iter != NULL; iter = next) {
async_command_t *cmd_other = iter->data;
next = iter->next; // We might delete this entry, so grab next now
if (cmd == cmd_other) {
continue;
}
/* A pending operation matches if:
* 1. The client connections are different.
* 2. The target is the same.
* 3. The fencing action is the same.
* 4. The device scheduled to execute the action is the same.
*/
if (pcmk__str_eq(cmd->client, cmd_other->client, pcmk__str_casei) ||
!pcmk__str_eq(cmd->target, cmd_other->target, pcmk__str_casei) ||
!pcmk__str_eq(cmd->action, cmd_other->action, pcmk__str_none) ||
!pcmk__str_eq(cmd->device, cmd_other->device, pcmk__str_casei)) {
continue;
}
crm_notice("Merging fencing action '%s'%s%s originating from "
"client %s with identical fencing request from client %s",
cmd_other->action,
(cmd_other->target == NULL)? "" : " targeting ",
pcmk__s(cmd_other->target, ""), cmd_other->client_name,
cmd->client_name);
// Stop tracking the duplicate, send its result, and cancel it
cmd_list = g_list_remove_link(cmd_list, iter);
send_async_reply(cmd_other, result, pid, true);
cancel_stonith_command(cmd_other);
free_async_command(cmd_other);
g_list_free_1(iter);
}
}
/*!
* \internal
* \brief Return the next required device (if any) for an operation
*
* \param[in,out] cmd Fencing operation that just succeeded
*
* \return Next device required for action if any, otherwise NULL
*/
static stonith_device_t *
next_required_device(async_command_t *cmd)
{
for (GList *iter = cmd->next_device_iter; iter != NULL; iter = iter->next) {
stonith_device_t *next_device = g_hash_table_lookup(device_list,
iter->data);
if (is_action_required(cmd->action, next_device)) {
/* This is only called for successful actions, so it's OK to skip
* non-required devices.
*/
cmd->next_device_iter = iter->next;
return next_device;
}
}
return NULL;
}
static void
st_child_done(int pid, const pcmk__action_result_t *result, void *user_data)
{
async_command_t *cmd = user_data;
stonith_device_t *device = NULL;
stonith_device_t *next_device = NULL;
CRM_CHECK(cmd != NULL, return);
device = cmd_device(cmd);
cmd->active_on = NULL;
/* The device is ready to do something else now */
if (device) {
if (!device->verified && pcmk__result_ok(result)
&& pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST,
PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS,
NULL)) {
device->verified = TRUE;
}
mainloop_set_trigger(device->work);
}
if (pcmk__result_ok(result)) {
next_device = next_required_device(cmd);
} else if ((cmd->next_device_iter != NULL)
&& !is_action_required(cmd->action, device)) {
/* if this device didn't work out, see if there are any others we can try.
* if the failed device was 'required', we can't pick another device. */
next_device = g_hash_table_lookup(device_list,
cmd->next_device_iter->data);
cmd->next_device_iter = cmd->next_device_iter->next;
}
if (next_device == NULL) {
send_async_reply(cmd, result, pid, false);
if (pcmk__result_ok(result)) {
reply_to_duplicates(cmd, result, pid);
}
free_async_command(cmd);
} else { // This operation requires more fencing
log_async_result(cmd, result, pid, next_device->id, false);
schedule_stonith_command(cmd, next_device);
}
}
static gint
sort_device_priority(gconstpointer a, gconstpointer b)
{
const stonith_device_t *dev_a = a;
const stonith_device_t *dev_b = b;
if (dev_a->priority > dev_b->priority) {
return -1;
} else if (dev_a->priority < dev_b->priority) {
return 1;
}
return 0;
}
static void
stonith_fence_get_devices_cb(GList * devices, void *user_data)
{
async_command_t *cmd = user_data;
stonith_device_t *device = NULL;
guint ndevices = g_list_length(devices);
crm_info("Found %d matching device%s for target '%s'",
ndevices, pcmk__plural_s(ndevices), cmd->target);
if (devices != NULL) {
/* Order based on priority */
devices = g_list_sort(devices, sort_device_priority);
device = g_hash_table_lookup(device_list, devices->data);
}
if (device == NULL) { // No device found
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
pcmk__format_result(&result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"No device configured for target '%s'",
cmd->target);
send_async_reply(cmd, &result, 0, false);
pcmk__reset_result(&result);
free_async_command(cmd);
g_list_free_full(devices, free);
} else { // Device found, schedule it for fencing
cmd->device_list = devices;
cmd->next_device_iter = devices->next;
schedule_stonith_command(cmd, device);
}
}
/*!
* \internal
* \brief Execute a fence action via the local node
*
* \param[in] msg Fencing request
* \param[out] result Where to store result of fence action
*/
static void
fence_locally(xmlNode *msg, pcmk__action_result_t *result)
{
const char *device_id = NULL;
stonith_device_t *device = NULL;
async_command_t *cmd = NULL;
xmlNode *dev = NULL;
CRM_CHECK((msg != NULL) && (result != NULL), return);
dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR);
cmd = create_async_command(msg);
if (cmd == NULL) {
crm_log_xml_warn(msg, "invalid");
fenced_set_protocol_error(result);
return;
}
device_id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (device_id != NULL) {
device = g_hash_table_lookup(device_list, device_id);
if (device == NULL) {
crm_err("Requested device '%s' is not available", device_id);
pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"Requested device '%s' not found", device_id);
return;
}
schedule_stonith_command(cmd, device);
} else {
const char *host = crm_element_value(dev, PCMK__XA_ST_TARGET);
if (pcmk_is_set(cmd->options, st_opt_cs_nodeid)) {
int nodeid = 0;
crm_node_t *node = NULL;
pcmk__scan_min_int(host, &nodeid, 0);
node = pcmk__search_node_caches(nodeid, NULL,
pcmk__node_search_any
|pcmk__node_search_cluster_cib);
if (node != NULL) {
host = node->uname;
}
}
/* If we get to here, then self-fencing is implicitly allowed */
get_capable_devices(host, cmd->action, cmd->default_timeout,
TRUE, cmd, stonith_fence_get_devices_cb,
fenced_support_flag(cmd->action));
}
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL);
}
/*!
* \internal
* \brief Build an XML reply for a fencing operation
*
* \param[in] request Request that reply is for
* \param[in] data If not NULL, add to reply as call data
* \param[in] result Full result of fencing operation
*
* \return Newly created XML reply
* \note The caller is responsible for freeing the result.
* \note This has some overlap with construct_async_reply(), but that copies
* values from an async_command_t, whereas this one copies them from the
* request.
*/
xmlNode *
fenced_construct_reply(const xmlNode *request, xmlNode *data,
const pcmk__action_result_t *result)
{
xmlNode *reply = NULL;
reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY);
crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__);
crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG);
stonith__xe_set_result(reply, result);
if (request == NULL) {
/* Most likely, this is the result of a stonith operation that was
* initiated before we came up. Unfortunately that means we lack enough
* information to provide clients with a full result.
*
* @TODO Maybe synchronize this information at start-up?
*/
crm_warn("Missing request information for client notifications for "
"operation with result '%s' (initiated before we came up?)",
pcmk_exec_status_str(result->execution_status));
} else {
const char *name = NULL;
const char *value = NULL;
// Attributes to copy from request to reply
const char *names[] = {
PCMK__XA_ST_OP,
PCMK__XA_ST_CALLID,
PCMK__XA_ST_CLIENTID,
PCMK__XA_ST_CLIENTNAME,
PCMK__XA_ST_REMOTE_OP,
PCMK__XA_ST_CALLOPT,
};
for (int lpc = 0; lpc < PCMK__NELEM(names); lpc++) {
name = names[lpc];
value = crm_element_value(request, name);
crm_xml_add(reply, name, value);
}
if (data != NULL) {
xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_ST_CALLDATA);
pcmk__xml_copy(wrapper, data);
}
}
return reply;
}
/*!
* \internal
* \brief Build an XML reply to an asynchronous fencing command
*
* \param[in] cmd Fencing command that reply is for
* \param[in] result Command result
*/
static xmlNode *
construct_async_reply(const async_command_t *cmd,
const pcmk__action_result_t *result)
{
xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY);
crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__);
crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG);
crm_xml_add(reply, PCMK__XA_ST_OP, cmd->op);
crm_xml_add(reply, PCMK__XA_ST_DEVICE_ID, cmd->device);
crm_xml_add(reply, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id);
crm_xml_add(reply, PCMK__XA_ST_CLIENTID, cmd->client);
crm_xml_add(reply, PCMK__XA_ST_CLIENTNAME, cmd->client_name);
crm_xml_add(reply, PCMK__XA_ST_TARGET, cmd->target);
crm_xml_add(reply, PCMK__XA_ST_DEVICE_ACTION, cmd->op);
crm_xml_add(reply, PCMK__XA_ST_ORIGIN, cmd->origin);
crm_xml_add_int(reply, PCMK__XA_ST_CALLID, cmd->id);
crm_xml_add_int(reply, PCMK__XA_ST_CALLOPT, cmd->options);
stonith__xe_set_result(reply, result);
return reply;
}
bool fencing_peer_active(crm_node_t *peer)
{
if (peer == NULL) {
return FALSE;
} else if (peer->uname == NULL) {
return FALSE;
} else if (pcmk_is_set(peer->processes, crm_get_cluster_proc())) {
return TRUE;
}
return FALSE;
}
void
set_fencing_completed(remote_fencing_op_t *op)
{
struct timespec tv;
qb_util_timespec_from_epoch_get(&tv);
op->completed = tv.tv_sec;
op->completed_nsec = tv.tv_nsec;
}
/*!
* \internal
* \brief Look for alternate node needed if local node shouldn't fence target
*
* \param[in] target Node that must be fenced
*
* \return Name of an alternate node that should fence \p target if any,
* or NULL otherwise
*/
static const char *
check_alternate_host(const char *target)
{
if (pcmk__str_eq(target, stonith_our_uname, pcmk__str_casei)) {
GHashTableIter gIter;
crm_node_t *entry = NULL;
g_hash_table_iter_init(&gIter, crm_peer_cache);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
if (fencing_peer_active(entry)
&& !pcmk__str_eq(entry->uname, target, pcmk__str_casei)) {
crm_notice("Forwarding self-fencing request to %s",
entry->uname);
return entry->uname;
}
}
crm_warn("Will handle own fencing because no peer can");
}
return NULL;
}
static void
remove_relay_op(xmlNode * request)
{
xmlNode *dev = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, request,
LOG_TRACE);
const char *relay_op_id = NULL;
const char *op_id = NULL;
const char *client_name = NULL;
const char *target = NULL;
remote_fencing_op_t *relay_op = NULL;
if (dev) {
target = crm_element_value(dev, PCMK__XA_ST_TARGET);
}
relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP_RELAY);
op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP);
client_name = crm_element_value(request, PCMK__XA_ST_CLIENTNAME);
/* Delete RELAY operation. */
if (relay_op_id && target && pcmk__str_eq(target, stonith_our_uname, pcmk__str_casei)) {
relay_op = g_hash_table_lookup(stonith_remote_op_list, relay_op_id);
if (relay_op) {
GHashTableIter iter;
remote_fencing_op_t *list_op = NULL;
g_hash_table_iter_init(&iter, stonith_remote_op_list);
/* If the operation to be deleted is registered as a duplicate, delete the registration. */
while (g_hash_table_iter_next(&iter, NULL, (void **)&list_op)) {
GList *dup_iter = NULL;
if (list_op != relay_op) {
for (dup_iter = list_op->duplicates; dup_iter != NULL; dup_iter = dup_iter->next) {
remote_fencing_op_t *other = dup_iter->data;
if (other == relay_op) {
other->duplicates = g_list_remove(other->duplicates, relay_op);
break;
}
}
}
}
crm_debug("Deleting relay op %s ('%s'%s%s for %s), "
"replaced by op %s ('%s'%s%s for %s)",
relay_op->id, relay_op->action,
(relay_op->target == NULL)? "" : " targeting ",
pcmk__s(relay_op->target, ""),
relay_op->client_name, op_id, relay_op->action,
(target == NULL)? "" : " targeting ", pcmk__s(target, ""),
client_name);
g_hash_table_remove(stonith_remote_op_list, relay_op_id);
}
}
}
/*!
* \internal
* \brief Check whether an API request was sent by a privileged user
*
* API commands related to fencing configuration may be done only by privileged
* IPC users (i.e. root or hacluster), because all other users should go through
* the CIB to have ACLs applied. If no client was given, this is a peer request,
* which is always allowed.
*
* \param[in] c IPC client that sent request (or NULL if sent by CPG peer)
* \param[in] op Requested API operation (for logging only)
*
* \return true if sender is peer or privileged client, otherwise false
*/
static inline bool
is_privileged(const pcmk__client_t *c, const char *op)
{
if ((c == NULL) || pcmk_is_set(c->flags, pcmk__client_privileged)) {
return true;
} else {
crm_warn("Rejecting IPC request '%s' from unprivileged client %s",
pcmk__s(op, ""), pcmk__client_name(c));
return false;
}
}
// CRM_OP_REGISTER
static xmlNode *
handle_register_request(pcmk__request_t *request)
{
xmlNode *reply = pcmk__xe_create(NULL, "reply");
CRM_ASSERT(request->ipc_client != NULL);
crm_xml_add(reply, PCMK__XA_ST_OP, CRM_OP_REGISTER);
crm_xml_add(reply, PCMK__XA_ST_CLIENTID, request->ipc_client->id);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_request_flags(request, pcmk__request_reuse_options);
return reply;
}
// STONITH_OP_EXEC
static xmlNode *
handle_agent_request(pcmk__request_t *request)
{
execute_agent_action(request->xml, &request->result);
if (request->result.execution_status == PCMK_EXEC_PENDING) {
return NULL;
}
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_TIMEOUT_UPDATE
static xmlNode *
handle_update_timeout_request(pcmk__request_t *request)
{
const char *call_id = crm_element_value(request->xml, PCMK__XA_ST_CALLID);
const char *client_id = crm_element_value(request->xml,
PCMK__XA_ST_CLIENTID);
int op_timeout = 0;
crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &op_timeout);
do_stonith_async_timeout_update(client_id, call_id, op_timeout);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return NULL;
}
// STONITH_OP_QUERY
static xmlNode *
handle_query_request(pcmk__request_t *request)
{
int timeout = 0;
xmlNode *dev = NULL;
const char *action = NULL;
const char *target = NULL;
const char *client_id = crm_element_value(request->xml,
PCMK__XA_ST_CLIENTID);
struct st_query_data *query = NULL;
if (request->peer != NULL) {
// Record it for the future notification
create_remote_stonith_op(client_id, request->xml, TRUE);
}
/* Delete the DC node RELAY operation. */
remove_relay_op(request->xml);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
dev = get_xpath_object("//@" PCMK__XE_ST_DEVICE_ACTION, request->xml,
LOG_NEVER);
if (dev != NULL) {
const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (pcmk__str_eq(device, "manual_ack", pcmk__str_casei)) {
return NULL; // No query or reply necessary
}
target = crm_element_value(dev, PCMK__XA_ST_TARGET);
action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION);
}
crm_log_xml_trace(request->xml, "Query");
query = pcmk__assert_alloc(1, sizeof(struct st_query_data));
query->reply = fenced_construct_reply(request->xml, NULL, &request->result);
query->remote_peer = pcmk__str_copy(request->peer);
query->client_id = pcmk__str_copy(client_id);
query->target = pcmk__str_copy(target);
query->action = pcmk__str_copy(action);
query->call_options = request->call_options;
crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &timeout);
get_capable_devices(target, action, timeout,
pcmk_is_set(query->call_options, st_opt_allow_suicide),
query, stonith_query_capable_device_cb, st_device_supports_none);
return NULL;
}
// STONITH_OP_NOTIFY
static xmlNode *
handle_notify_request(pcmk__request_t *request)
{
const char *flag_name = NULL;
CRM_ASSERT(request->ipc_client != NULL);
flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_ACTIVATE);
if (flag_name != NULL) {
crm_debug("Enabling %s callbacks for client %s",
flag_name, pcmk__request_origin(request));
pcmk__set_client_flags(request->ipc_client, get_stonith_flag(flag_name));
}
flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_DEACTIVATE);
if (flag_name != NULL) {
crm_debug("Disabling %s callbacks for client %s",
flag_name, pcmk__request_origin(request));
pcmk__clear_client_flags(request->ipc_client,
get_stonith_flag(flag_name));
}
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_request_flags(request, pcmk__request_reuse_options);
return pcmk__ipc_create_ack(request->ipc_flags, PCMK__XE_ACK, NULL,
CRM_EX_OK);
}
// STONITH_OP_RELAY
static xmlNode *
handle_relay_request(pcmk__request_t *request)
{
xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request->xml,
LOG_TRACE);
crm_notice("Received forwarded fencing request from "
"%s %s to fence (%s) peer %s",
pcmk__request_origin_type(request),
pcmk__request_origin(request),
crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION),
crm_element_value(dev, PCMK__XA_ST_TARGET));
if (initiate_remote_stonith_op(NULL, request->xml, FALSE) == NULL) {
fenced_set_protocol_error(&request->result);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL);
return NULL;
}
// STONITH_OP_FENCE
static xmlNode *
handle_fence_request(pcmk__request_t *request)
{
if ((request->peer != NULL) || stand_alone) {
fence_locally(request->xml, &request->result);
} else if (pcmk_is_set(request->call_options, st_opt_manual_ack)) {
switch (fenced_handle_manual_confirmation(request->ipc_client,
request->xml)) {
case pcmk_rc_ok:
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE,
NULL);
break;
case EINPROGRESS:
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING,
NULL);
break;
default:
fenced_set_protocol_error(&request->result);
break;
}
} else {
const char *alternate_host = NULL;
xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request->xml,
LOG_TRACE);
const char *target = crm_element_value(dev, PCMK__XA_ST_TARGET);
const char *action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION);
const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (request->ipc_client != NULL) {
int tolerance = 0;
crm_notice("Client %s wants to fence (%s) %s using %s",
pcmk__request_origin(request), action,
target, (device? device : "any device"));
crm_element_value_int(dev, PCMK__XA_ST_TOLERANCE, &tolerance);
if (stonith_check_fence_tolerance(tolerance, target, action)) {
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE,
NULL);
return fenced_construct_reply(request->xml, NULL,
&request->result);
}
alternate_host = check_alternate_host(target);
} else {
crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'",
request->peer, action, target,
(device == NULL)? "(any)" : device);
}
if (alternate_host != NULL) {
const char *client_id = NULL;
remote_fencing_op_t *op = NULL;
crm_node_t *node = pcmk__get_node(0, alternate_host, NULL,
pcmk__node_search_cluster_member);
if (request->ipc_client->id == 0) {
client_id = crm_element_value(request->xml,
PCMK__XA_ST_CLIENTID);
} else {
client_id = request->ipc_client->id;
}
/* Create a duplicate fencing operation to relay with the client ID.
* When a query response is received, this operation should be
* deleted to avoid keeping the duplicate around.
*/
op = create_remote_stonith_op(client_id, request->xml, FALSE);
crm_xml_add(request->xml, PCMK__XA_ST_OP, STONITH_OP_RELAY);
crm_xml_add(request->xml, PCMK__XA_ST_CLIENTID,
request->ipc_client->id);
crm_xml_add(request->xml, PCMK__XA_ST_REMOTE_OP, op->id);
pcmk__cluster_send_message(node, crm_msg_stonith_ng, request->xml);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING,
NULL);
} else if (initiate_remote_stonith_op(request->ipc_client, request->xml,
FALSE) == NULL) {
fenced_set_protocol_error(&request->result);
} else {
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING,
NULL);
}
}
if (request->result.execution_status == PCMK_EXEC_PENDING) {
return NULL;
}
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_FENCE_HISTORY
static xmlNode *
handle_history_request(pcmk__request_t *request)
{
xmlNode *reply = NULL;
xmlNode *data = NULL;
stonith_fence_history(request->xml, &data, request->peer,
request->call_options);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
if (!pcmk_is_set(request->call_options, st_opt_discard_reply)) {
/* When the local node broadcasts its history, it sets
* st_opt_discard_reply and doesn't need a reply.
*/
reply = fenced_construct_reply(request->xml, data, &request->result);
}
free_xml(data);
return reply;
}
// STONITH_OP_DEVICE_ADD
static xmlNode *
handle_device_add_request(pcmk__request_t *request)
{
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, request->xml,
LOG_ERR);
if (is_privileged(request->ipc_client, op)) {
int rc = stonith_device_register(dev, FALSE);
pcmk__set_result(&request->result,
((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR),
stonith__legacy2status(rc),
((rc == pcmk_ok)? NULL : pcmk_strerror(rc)));
} else {
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must register device via CIB");
}
fenced_send_config_notification(op, &request->result,
(dev == NULL)? NULL : pcmk__xe_id(dev));
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_DEVICE_DEL
static xmlNode *
handle_device_delete_request(pcmk__request_t *request)
{
xmlNode *dev = get_xpath_object("//" PCMK__XE_ST_DEVICE_ID, request->xml,
LOG_ERR);
const char *device_id = crm_element_value(dev, PCMK_XA_ID);
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
if (is_privileged(request->ipc_client, op)) {
stonith_device_remove(device_id, false);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
} else {
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must delete device via CIB");
}
fenced_send_config_notification(op, &request->result, device_id);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_LEVEL_ADD
static xmlNode *
handle_level_add_request(pcmk__request_t *request)
{
char *desc = NULL;
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
if (is_privileged(request->ipc_client, op)) {
fenced_register_level(request->xml, &desc, &request->result);
} else {
unpack_level_request(request->xml, NULL, NULL, NULL, &desc);
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must add level via CIB");
}
fenced_send_config_notification(op, &request->result, desc);
free(desc);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_LEVEL_DEL
static xmlNode *
handle_level_delete_request(pcmk__request_t *request)
{
char *desc = NULL;
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
if (is_privileged(request->ipc_client, op)) {
fenced_unregister_level(request->xml, &desc, &request->result);
} else {
unpack_level_request(request->xml, NULL, NULL, NULL, &desc);
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must delete level via CIB");
}
fenced_send_config_notification(op, &request->result, desc);
free(desc);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// CRM_OP_RM_NODE_CACHE
static xmlNode *
handle_cache_request(pcmk__request_t *request)
{
int node_id = 0;
const char *name = NULL;
crm_element_value_int(request->xml, PCMK_XA_ID, &node_id);
name = crm_element_value(request->xml, PCMK_XA_UNAME);
pcmk__cluster_forget_cluster_node(node_id, name);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return NULL;
}
static xmlNode *
handle_unknown_request(pcmk__request_t *request)
{
crm_err("Unknown IPC request %s from %s %s",
request->op, pcmk__request_origin_type(request),
pcmk__request_origin(request));
pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID,
"Unknown IPC request type '%s' (bug?)", request->op);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
static void
fenced_register_handlers(void)
{
pcmk__server_command_t handlers[] = {
{ CRM_OP_REGISTER, handle_register_request },
{ STONITH_OP_EXEC, handle_agent_request },
{ STONITH_OP_TIMEOUT_UPDATE, handle_update_timeout_request },
{ STONITH_OP_QUERY, handle_query_request },
{ STONITH_OP_NOTIFY, handle_notify_request },
{ STONITH_OP_RELAY, handle_relay_request },
{ STONITH_OP_FENCE, handle_fence_request },
{ STONITH_OP_FENCE_HISTORY, handle_history_request },
{ STONITH_OP_DEVICE_ADD, handle_device_add_request },
{ STONITH_OP_DEVICE_DEL, handle_device_delete_request },
{ STONITH_OP_LEVEL_ADD, handle_level_add_request },
{ STONITH_OP_LEVEL_DEL, handle_level_delete_request },
{ CRM_OP_RM_NODE_CACHE, handle_cache_request },
{ NULL, handle_unknown_request },
};
fenced_handlers = pcmk__register_handlers(handlers);
}
void
fenced_unregister_handlers(void)
{
if (fenced_handlers != NULL) {
g_hash_table_destroy(fenced_handlers);
fenced_handlers = NULL;
}
}
static void
handle_request(pcmk__request_t *request)
{
xmlNode *reply = NULL;
const char *reason = NULL;
if (fenced_handlers == NULL) {
fenced_register_handlers();
}
reply = pcmk__process_request(request, fenced_handlers);
if (reply != NULL) {
if (pcmk_is_set(request->flags, pcmk__request_reuse_options)
&& (request->ipc_client != NULL)) {
/* Certain IPC-only commands must reuse the call options from the
* original request rather than the ones set by stonith_send_reply()
* -> do_local_reply().
*/
pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply,
request->ipc_flags);
request->ipc_client->request_id = 0;
} else {
stonith_send_reply(reply, request->call_options,
request->peer, request->ipc_client);
}
free_xml(reply);
}
reason = request->result.exit_reason;
crm_debug("Processed %s request from %s %s: %s%s%s%s",
request->op, pcmk__request_origin_type(request),
pcmk__request_origin(request),
pcmk_exec_status_str(request->result.execution_status),
(reason == NULL)? "" : " (",
(reason == NULL)? "" : reason,
(reason == NULL)? "" : ")");
}
static void
handle_reply(pcmk__client_t *client, xmlNode *request, const char *remote_peer)
{
// Copy, because request might be freed before we want to log this
char *op = crm_element_value_copy(request, PCMK__XA_ST_OP);
if (pcmk__str_eq(op, STONITH_OP_QUERY, pcmk__str_none)) {
process_remote_stonith_query(request);
} else if (pcmk__str_any_of(op, STONITH_OP_NOTIFY, STONITH_OP_FENCE,
NULL)) {
fenced_process_fencing_reply(request);
} else {
crm_err("Ignoring unknown %s reply from %s %s",
pcmk__s(op, "untyped"), ((client == NULL)? "peer" : "client"),
((client == NULL)? remote_peer : pcmk__client_name(client)));
crm_log_xml_warn(request, "UnknownOp");
free(op);
return;
}
crm_debug("Processed %s reply from %s %s",
op, ((client == NULL)? "peer" : "client"),
((client == NULL)? remote_peer : pcmk__client_name(client)));
free(op);
}
/*!
* \internal
* \brief Handle a message from an IPC client or CPG peer
*
* \param[in,out] client If not NULL, IPC client that sent message
* \param[in] id If from IPC client, IPC message ID
* \param[in] flags Message flags
* \param[in,out] message Message XML
* \param[in] remote_peer If not NULL, CPG peer that sent message
*/
void
stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags,
xmlNode *message, const char *remote_peer)
{
int call_options = st_opt_none;
bool is_reply = false;
CRM_CHECK(message != NULL, return);
if (get_xpath_object("//" PCMK__XE_ST_REPLY, message, LOG_NEVER) != NULL) {
is_reply = true;
}
crm_element_value_int(message, PCMK__XA_ST_CALLOPT, &call_options);
crm_debug("Processing %ssynchronous %s %s %u from %s %s",
pcmk_is_set(call_options, st_opt_sync_call)? "" : "a",
crm_element_value(message, PCMK__XA_ST_OP),
(is_reply? "reply" : "request"), id,
((client == NULL)? "peer" : "client"),
((client == NULL)? remote_peer : pcmk__client_name(client)));
if (pcmk_is_set(call_options, st_opt_sync_call)) {
CRM_ASSERT(client == NULL || client->request_id == id);
}
if (is_reply) {
handle_reply(client, message, remote_peer);
} else {
pcmk__request_t request = {
.ipc_client = client,
.ipc_id = id,
.ipc_flags = flags,
.peer = remote_peer,
.xml = message,
.call_options = call_options,
.result = PCMK__UNKNOWN_RESULT,
};
request.op = crm_element_value_copy(request.xml, PCMK__XA_ST_OP);
CRM_CHECK(request.op != NULL, return);
if (pcmk_is_set(request.call_options, st_opt_sync_call)) {
pcmk__set_request_flags(&request, pcmk__request_sync);
}
handle_request(&request);
pcmk__reset_request(&request);
}
}
diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c
index f87eeb6e88..0146cd1ddf 100644
--- a/daemons/fenced/fenced_remote.c
+++ b/daemons/fenced/fenced_remote.c
@@ -1,2597 +1,2597 @@
/*
* Copyright 2009-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/utsname.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <ctype.h>
#include <regex.h>
#include <crm/crm.h>
#include <crm/common/ipc.h>
#include <crm/common/ipc_internal.h>
#include <crm/cluster/internal.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/util.h>
#include <pacemaker-fenced.h>
#define TIMEOUT_MULTIPLY_FACTOR 1.2
/* When one fencer queries its peers for devices able to handle a fencing
* request, each peer will reply with a list of such devices available to it.
* Each reply will be parsed into a peer_device_info_t, with each device's
* information kept in a device_properties_t.
*/
typedef struct device_properties_s {
/* Whether access to this device has been verified */
gboolean verified;
/* The remaining members are indexed by the operation's "phase" */
/* Whether this device has been executed in each phase */
gboolean executed[st_phase_max];
/* Whether this device is disallowed from executing in each phase */
gboolean disallowed[st_phase_max];
/* Action-specific timeout for each phase */
int custom_action_timeout[st_phase_max];
/* Action-specific maximum random delay for each phase */
int delay_max[st_phase_max];
/* Action-specific base delay for each phase */
int delay_base[st_phase_max];
/* Group of enum st_device_flags */
uint32_t device_support_flags;
} device_properties_t;
typedef struct {
/* Name of peer that sent this result */
char *host;
/* Only try peers for non-topology based operations once */
gboolean tried;
/* Number of entries in the devices table */
int ndevices;
/* Devices available to this host that are capable of fencing the target */
GHashTable *devices;
} peer_device_info_t;
GHashTable *stonith_remote_op_list = NULL;
extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
int call_options);
static void request_peer_fencing(remote_fencing_op_t *op,
peer_device_info_t *peer);
static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup);
static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
static int get_op_total_timeout(const remote_fencing_op_t *op,
const peer_device_info_t *chosen_peer);
static gint
sort_strings(gconstpointer a, gconstpointer b)
{
return strcmp(a, b);
}
static void
free_remote_query(gpointer data)
{
if (data != NULL) {
peer_device_info_t *peer = data;
g_hash_table_destroy(peer->devices);
free(peer->host);
free(peer);
}
}
void
free_stonith_remote_op_list(void)
{
if (stonith_remote_op_list != NULL) {
g_hash_table_destroy(stonith_remote_op_list);
stonith_remote_op_list = NULL;
}
}
struct peer_count_data {
const remote_fencing_op_t *op;
gboolean verified_only;
uint32_t support_action_only;
int count;
};
/*!
* \internal
* \brief Increment a counter if a device has not been executed yet
*
* \param[in] key Device ID (ignored)
* \param[in] value Device properties
* \param[in,out] user_data Peer count data
*/
static void
count_peer_device(gpointer key, gpointer value, gpointer user_data)
{
device_properties_t *props = (device_properties_t*)value;
struct peer_count_data *data = user_data;
if (!props->executed[data->op->phase]
&& (!data->verified_only || props->verified)
&& ((data->support_action_only == st_device_supports_none) || pcmk_is_set(props->device_support_flags, data->support_action_only))) {
++(data->count);
}
}
/*!
* \internal
* \brief Check the number of available devices in a peer's query results
*
* \param[in] op Operation that results are for
* \param[in] peer Peer to count
* \param[in] verified_only Whether to count only verified devices
* \param[in] support_action_only Whether to count only devices that support action
*
* \return Number of devices available to peer that were not already executed
*/
static int
count_peer_devices(const remote_fencing_op_t *op,
const peer_device_info_t *peer, gboolean verified_only, uint32_t support_on_action_only)
{
struct peer_count_data data;
data.op = op;
data.verified_only = verified_only;
data.support_action_only = support_on_action_only;
data.count = 0;
if (peer) {
g_hash_table_foreach(peer->devices, count_peer_device, &data);
}
return data.count;
}
/*!
* \internal
* \brief Search for a device in a query result
*
* \param[in] op Operation that result is for
* \param[in] peer Query result for a peer
* \param[in] device Device ID to search for
*
* \return Device properties if found, NULL otherwise
*/
static device_properties_t *
find_peer_device(const remote_fencing_op_t *op, const peer_device_info_t *peer,
const char *device, uint32_t support_action_only)
{
device_properties_t *props = g_hash_table_lookup(peer->devices, device);
if (props && support_action_only != st_device_supports_none && !pcmk_is_set(props->device_support_flags, support_action_only)) {
return NULL;
}
return (props && !props->executed[op->phase]
&& !props->disallowed[op->phase])? props : NULL;
}
/*!
* \internal
* \brief Find a device in a peer's device list and mark it as executed
*
* \param[in] op Operation that peer result is for
* \param[in,out] peer Peer with results to search
* \param[in] device ID of device to mark as done
* \param[in] verified_devices_only Only consider verified devices
*
* \return TRUE if device was found and marked, FALSE otherwise
*/
static gboolean
grab_peer_device(const remote_fencing_op_t *op, peer_device_info_t *peer,
const char *device, gboolean verified_devices_only)
{
device_properties_t *props = find_peer_device(op, peer, device,
fenced_support_flag(op->action));
if ((props == NULL) || (verified_devices_only && !props->verified)) {
return FALSE;
}
crm_trace("Removing %s from %s (%d remaining)",
device, peer->host, count_peer_devices(op, peer, FALSE, st_device_supports_none));
props->executed[op->phase] = TRUE;
return TRUE;
}
static void
clear_remote_op_timers(remote_fencing_op_t * op)
{
if (op->query_timer) {
g_source_remove(op->query_timer);
op->query_timer = 0;
}
if (op->op_timer_total) {
g_source_remove(op->op_timer_total);
op->op_timer_total = 0;
}
if (op->op_timer_one) {
g_source_remove(op->op_timer_one);
op->op_timer_one = 0;
}
}
static void
free_remote_op(gpointer data)
{
remote_fencing_op_t *op = data;
crm_log_xml_debug(op->request, "Destroying");
clear_remote_op_timers(op);
free(op->id);
free(op->action);
free(op->delegate);
free(op->target);
free(op->client_id);
free(op->client_name);
free(op->originator);
if (op->query_results) {
g_list_free_full(op->query_results, free_remote_query);
}
if (op->request) {
free_xml(op->request);
op->request = NULL;
}
if (op->devices_list) {
g_list_free_full(op->devices_list, free);
op->devices_list = NULL;
}
g_list_free_full(op->automatic_list, free);
g_list_free(op->duplicates);
pcmk__reset_result(&op->result);
free(op);
}
void
init_stonith_remote_op_hash_table(GHashTable **table)
{
if (*table == NULL) {
*table = pcmk__strkey_table(NULL, free_remote_op);
}
}
/*!
* \internal
* \brief Return an operation's originally requested action (before any remap)
*
* \param[in] op Operation to check
*
* \return Operation's original action
*/
static const char *
op_requested_action(const remote_fencing_op_t *op)
{
return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action);
}
/*!
* \internal
* \brief Remap a "reboot" operation to the "off" phase
*
* \param[in,out] op Operation to remap
*/
static void
op_phase_off(remote_fencing_op_t *op)
{
crm_info("Remapping multiple-device reboot targeting %s to 'off' "
CRM_XS " id=%.8s", op->target, op->id);
op->phase = st_phase_off;
/* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
* memory allocation at each phase.
*/
strcpy(op->action, PCMK_ACTION_OFF);
}
/*!
* \internal
* \brief Advance a remapped reboot operation to the "on" phase
*
* \param[in,out] op Operation to remap
*/
static void
op_phase_on(remote_fencing_op_t *op)
{
GList *iter = NULL;
crm_info("Remapped 'off' targeting %s complete, "
"remapping to 'on' for %s " CRM_XS " id=%.8s",
op->target, op->client_name, op->id);
op->phase = st_phase_on;
strcpy(op->action, PCMK_ACTION_ON);
/* Skip devices with automatic unfencing, because the cluster will handle it
* when the node rejoins.
*/
for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
GList *match = g_list_find_custom(op->devices_list, iter->data,
sort_strings);
if (match) {
op->devices_list = g_list_remove(op->devices_list, match->data);
}
}
g_list_free_full(op->automatic_list, free);
op->automatic_list = NULL;
/* Rewind device list pointer */
op->devices = op->devices_list;
}
/*!
* \internal
* \brief Reset a remapped reboot operation
*
* \param[in,out] op Operation to reset
*/
static void
undo_op_remap(remote_fencing_op_t *op)
{
if (op->phase > 0) {
crm_info("Undoing remap of reboot targeting %s for %s "
CRM_XS " id=%.8s", op->target, op->client_name, op->id);
op->phase = st_phase_requested;
strcpy(op->action, PCMK_ACTION_REBOOT);
}
}
/*!
* \internal
* \brief Create notification data XML for a fencing operation result
*
* \param[in,out] parent Parent XML element for newly created element
* \param[in] op Fencer operation that completed
*
* \return Newly created XML to add as notification data
* \note The caller is responsible for freeing the result.
*/
static xmlNode *
fencing_result2xml(xmlNode *parent, const remote_fencing_op_t *op)
{
xmlNode *notify_data = pcmk__xe_create(parent, PCMK__XE_ST_NOTIFY_FENCE);
crm_xml_add_int(notify_data, PCMK_XA_STATE, op->state);
crm_xml_add(notify_data, PCMK__XA_ST_TARGET, op->target);
crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ACTION, op->action);
crm_xml_add(notify_data, PCMK__XA_ST_DELEGATE, op->delegate);
crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, op->id);
crm_xml_add(notify_data, PCMK__XA_ST_ORIGIN, op->originator);
crm_xml_add(notify_data, PCMK__XA_ST_CLIENTID, op->client_id);
crm_xml_add(notify_data, PCMK__XA_ST_CLIENTNAME, op->client_name);
return notify_data;
}
/*!
* \internal
* \brief Broadcast a fence result notification to all CPG peers
*
* \param[in] op Fencer operation that completed
* \param[in] op_merged Whether this operation is a duplicate of another
*/
void
fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged)
{
static int count = 0;
xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY);
xmlNode *wrapper = NULL;
xmlNode *notify_data = NULL;
count++;
crm_trace("Broadcasting result to peers");
crm_xml_add(bcast, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY);
crm_xml_add(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST);
crm_xml_add(bcast, PCMK__XA_ST_OP, STONITH_OP_NOTIFY);
crm_xml_add_int(bcast, PCMK_XA_COUNT, count);
if (op_merged) {
pcmk__xe_set_bool_attr(bcast, PCMK__XA_ST_OP_MERGED, true);
}
wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA);
notify_data = fencing_result2xml(wrapper, op);
stonith__xe_set_result(notify_data, &op->result);
pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, bcast);
free_xml(bcast);
return;
}
/*!
* \internal
* \brief Reply to a local request originator and notify all subscribed clients
*
* \param[in,out] op Fencer operation that completed
* \param[in,out] data Top-level XML to add notification to
*/
static void
handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data)
{
xmlNode *notify_data = NULL;
xmlNode *reply = NULL;
pcmk__client_t *client = NULL;
if (op->notify_sent == TRUE) {
/* nothing to do */
return;
}
/* Do notification with a clean data object */
crm_xml_add_int(data, PCMK_XA_STATE, op->state);
crm_xml_add(data, PCMK__XA_ST_TARGET, op->target);
crm_xml_add(data, PCMK__XA_ST_OP, op->action);
reply = fenced_construct_reply(op->request, data, &op->result);
crm_xml_add(reply, PCMK__XA_ST_DELEGATE, op->delegate);
/* Send fencing OP reply to local client that initiated fencing */
client = pcmk__find_client_by_id(op->client_id);
if (client == NULL) {
crm_trace("Skipping reply to %s: no longer a client", op->client_id);
} else {
do_local_reply(reply, client, op->call_options);
}
/* bcast to all local clients that the fencing operation happend */
notify_data = fencing_result2xml(NULL, op);
fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, &op->result,
notify_data);
free_xml(notify_data);
fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL);
/* mark this op as having notify's already sent */
op->notify_sent = TRUE;
free_xml(reply);
}
/*!
* \internal
* \brief Finalize all duplicates of a given fencer operation
*
* \param[in,out] op Fencer operation that completed
* \param[in,out] data Top-level XML to add notification to
*/
static void
finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data)
{
for (GList *iter = op->duplicates; iter != NULL; iter = iter->next) {
remote_fencing_op_t *other = iter->data;
if (other->state == st_duplicate) {
other->state = op->state;
crm_debug("Performing duplicate notification for %s@%s: %s "
CRM_XS " id=%.8s",
other->client_name, other->originator,
pcmk_exec_status_str(op->result.execution_status),
other->id);
pcmk__copy_result(&op->result, &other->result);
finalize_op(other, data, true);
} else {
// Possible if (for example) it timed out already
crm_err("Skipping duplicate notification for %s@%s "
CRM_XS " state=%s id=%.8s",
other->client_name, other->originator,
stonith_op_state_str(other->state), other->id);
}
}
}
static char *
delegate_from_xml(xmlNode *xml)
{
xmlNode *match = get_xpath_object("//@" PCMK__XA_ST_DELEGATE, xml,
LOG_NEVER);
if (match == NULL) {
return crm_element_value_copy(xml, PCMK__XA_SRC);
} else {
return crm_element_value_copy(match, PCMK__XA_ST_DELEGATE);
}
}
/*!
* \internal
* \brief Finalize a peer fencing operation
*
* Clean up after a fencing operation completes. This function has two code
* paths: the executioner uses it to broadcast the result to CPG peers, and then
* each peer (including the executioner) uses it to process that broadcast and
* notify its IPC clients of the result.
*
* \param[in,out] op Fencer operation that completed
* \param[in,out] data If not NULL, XML reply of last delegated operation
* \param[in] dup Whether this operation is a duplicate of another
* (in which case, do not broadcast the result)
*
* \note The operation result should be set before calling this function.
*/
static void
finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup)
{
int level = LOG_ERR;
const char *subt = NULL;
xmlNode *local_data = NULL;
gboolean op_merged = FALSE;
CRM_CHECK((op != NULL), return);
// This is a no-op if timers have already been cleared
clear_remote_op_timers(op);
if (op->notify_sent) {
// Most likely, this is a timed-out action that eventually completed
crm_notice("Operation '%s'%s%s by %s for %s@%s%s: "
"Result arrived too late " CRM_XS " id=%.8s",
op->action, (op->target? " targeting " : ""),
(op->target? op->target : ""),
(op->delegate? op->delegate : "unknown node"),
op->client_name, op->originator,
(op_merged? " (merged)" : ""),
op->id);
return;
}
set_fencing_completed(op);
undo_op_remap(op);
if (data == NULL) {
data = pcmk__xe_create(NULL, "remote-op");
local_data = data;
} else if (op->delegate == NULL) {
switch (op->result.execution_status) {
case PCMK_EXEC_NO_FENCE_DEVICE:
break;
case PCMK_EXEC_INVALID:
if (op->result.exit_status != CRM_EX_EXPIRED) {
op->delegate = delegate_from_xml(data);
}
break;
default:
op->delegate = delegate_from_xml(data);
break;
}
}
if (dup || (crm_element_value(data, PCMK__XA_ST_OP_MERGED) != NULL)) {
op_merged = true;
}
/* Tell everyone the operation is done, we will continue
* with doing the local notifications once we receive
* the broadcast back. */
subt = crm_element_value(data, PCMK__XA_SUBT);
if (!dup && !pcmk__str_eq(subt, PCMK__VALUE_BROADCAST, pcmk__str_none)) {
/* Defer notification until the bcast message arrives */
fenced_broadcast_op_result(op, op_merged);
free_xml(local_data);
return;
}
if (pcmk__result_ok(&op->result) || dup
|| !pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) {
level = LOG_NOTICE;
}
do_crm_log(level, "Operation '%s'%s%s by %s for %s@%s%s: %s (%s%s%s) "
CRM_XS " id=%.8s", op->action, (op->target? " targeting " : ""),
(op->target? op->target : ""),
(op->delegate? op->delegate : "unknown node"),
op->client_name, op->originator,
(op_merged? " (merged)" : ""),
crm_exit_str(op->result.exit_status),
pcmk_exec_status_str(op->result.execution_status),
((op->result.exit_reason == NULL)? "" : ": "),
((op->result.exit_reason == NULL)? "" : op->result.exit_reason),
op->id);
handle_local_reply_and_notify(op, data);
if (!dup) {
finalize_op_duplicates(op, data);
}
/* Free non-essential parts of the record
* Keep the record around so we can query the history
*/
if (op->query_results) {
g_list_free_full(op->query_results, free_remote_query);
op->query_results = NULL;
}
if (op->request) {
free_xml(op->request);
op->request = NULL;
}
free_xml(local_data);
}
/*!
* \internal
* \brief Finalize a watchdog fencer op after the waiting time expires
*
* \param[in,out] userdata Fencer operation that completed
*
* \return G_SOURCE_REMOVE (which tells glib not to restart timer)
*/
static gboolean
remote_op_watchdog_done(gpointer userdata)
{
remote_fencing_op_t *op = userdata;
op->op_timer_one = 0;
crm_notice("Self-fencing (%s) by %s for %s assumed complete "
CRM_XS " id=%.8s",
op->action, op->target, op->client_name, op->id);
op->state = st_done;
pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
finalize_op(op, NULL, false);
return G_SOURCE_REMOVE;
}
static gboolean
remote_op_timeout_one(gpointer userdata)
{
remote_fencing_op_t *op = userdata;
op->op_timer_one = 0;
crm_notice("Peer's '%s' action targeting %s for client %s timed out " CRM_XS
" id=%.8s", op->action, op->target, op->client_name, op->id);
pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT,
"Peer did not return fence result within timeout");
// The requested delay has been applied for the first device
if (op->client_delay > 0) {
op->client_delay = 0;
crm_trace("Try another device for '%s' action targeting %s "
"for client %s without delay " CRM_XS " id=%.8s",
op->action, op->target, op->client_name, op->id);
}
// Try another device, if appropriate
request_peer_fencing(op, NULL);
return G_SOURCE_REMOVE;
}
/*!
* \internal
* \brief Finalize a remote fencer operation that timed out
*
* \param[in,out] op Fencer operation that timed out
* \param[in] reason Readable description of what step timed out
*/
static void
finalize_timed_out_op(remote_fencing_op_t *op, const char *reason)
{
crm_debug("Action '%s' targeting %s for client %s timed out "
CRM_XS " id=%.8s",
op->action, op->target, op->client_name, op->id);
if (op->phase == st_phase_on) {
/* A remapped reboot operation timed out in the "on" phase, but the
* "off" phase completed successfully, so quit trying any further
* devices, and return success.
*/
op->state = st_done;
pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
} else {
op->state = st_failed;
pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, reason);
}
finalize_op(op, NULL, false);
}
/*!
* \internal
* \brief Finalize a remote fencer operation that timed out
*
* \param[in,out] userdata Fencer operation that timed out
*
* \return G_SOURCE_REMOVE (which tells glib not to restart timer)
*/
static gboolean
remote_op_timeout(gpointer userdata)
{
remote_fencing_op_t *op = userdata;
op->op_timer_total = 0;
if (op->state == st_done) {
crm_debug("Action '%s' targeting %s for client %s already completed "
CRM_XS " id=%.8s",
op->action, op->target, op->client_name, op->id);
} else {
finalize_timed_out_op(userdata, "Fencing did not complete within a "
"total timeout based on the "
"configured timeout and retries for "
"any devices attempted");
}
return G_SOURCE_REMOVE;
}
static gboolean
remote_op_query_timeout(gpointer data)
{
remote_fencing_op_t *op = data;
op->query_timer = 0;
if (op->state == st_done) {
crm_debug("Operation %.8s targeting %s already completed",
op->id, op->target);
} else if (op->state == st_exec) {
crm_debug("Operation %.8s targeting %s already in progress",
op->id, op->target);
} else if (op->query_results) {
// Query succeeded, so attempt the actual fencing
crm_debug("Query %.8s targeting %s complete (state=%s)",
op->id, op->target, stonith_op_state_str(op->state));
request_peer_fencing(op, NULL);
} else {
crm_debug("Query %.8s targeting %s timed out (state=%s)",
op->id, op->target, stonith_op_state_str(op->state));
finalize_timed_out_op(op, "No capable peers replied to device query "
"within timeout");
}
return G_SOURCE_REMOVE;
}
static gboolean
topology_is_empty(stonith_topology_t *tp)
{
int i;
if (tp == NULL) {
return TRUE;
}
- for (i = 0; i < ST_LEVEL_MAX; i++) {
+ for (i = 0; i < ST__LEVEL_COUNT; i++) {
if (tp->levels[i] != NULL) {
return FALSE;
}
}
return TRUE;
}
/*!
* \internal
* \brief Add a device to an operation's automatic unfencing list
*
* \param[in,out] op Operation to modify
* \param[in] device Device ID to add
*/
static void
add_required_device(remote_fencing_op_t *op, const char *device)
{
GList *match = g_list_find_custom(op->automatic_list, device,
sort_strings);
if (!match) {
op->automatic_list = g_list_prepend(op->automatic_list,
pcmk__str_copy(device));
}
}
/*!
* \internal
* \brief Remove a device from the automatic unfencing list
*
* \param[in,out] op Operation to modify
* \param[in] device Device ID to remove
*/
static void
remove_required_device(remote_fencing_op_t *op, const char *device)
{
GList *match = g_list_find_custom(op->automatic_list, device,
sort_strings);
if (match) {
op->automatic_list = g_list_remove(op->automatic_list, match->data);
}
}
/* deep copy the device list */
static void
set_op_device_list(remote_fencing_op_t * op, GList *devices)
{
GList *lpc = NULL;
if (op->devices_list) {
g_list_free_full(op->devices_list, free);
op->devices_list = NULL;
}
for (lpc = devices; lpc != NULL; lpc = lpc->next) {
const char *device = lpc->data;
op->devices_list = g_list_append(op->devices_list,
pcmk__str_copy(device));
}
op->devices = op->devices_list;
}
/*!
* \internal
* \brief Check whether a node matches a topology target
*
* \param[in] tp Topology table entry to check
* \param[in] node Name of node to check
*
* \return TRUE if node matches topology target
*/
static gboolean
topology_matches(const stonith_topology_t *tp, const char *node)
{
regex_t r_patt;
CRM_CHECK(node && tp && tp->target, return FALSE);
switch (tp->kind) {
case fenced_target_by_attribute:
/* This level targets by attribute, so tp->target is a NAME=VALUE pair
* of a permanent attribute applied to targeted nodes. The test below
* relies on the locally cached copy of the CIB, so if fencing needs to
* be done before the initial CIB is received or after a malformed CIB
* is received, then the topology will be unable to be used.
*/
if (node_has_attr(node, tp->target_attribute, tp->target_value)) {
crm_notice("Matched %s with %s by attribute", node, tp->target);
return TRUE;
}
break;
case fenced_target_by_pattern:
/* This level targets node names matching a pattern, so tp->target
* (and tp->target_pattern) is a regular expression.
*/
if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) {
crm_info("Bad regex '%s' for fencing level", tp->target);
} else {
int status = regexec(&r_patt, node, 0, NULL, 0);
regfree(&r_patt);
if (status == 0) {
crm_notice("Matched %s with %s by name", node, tp->target);
return TRUE;
}
}
break;
case fenced_target_by_name:
crm_trace("Testing %s against %s", node, tp->target);
return pcmk__str_eq(tp->target, node, pcmk__str_casei);
default:
break;
}
crm_trace("No match for %s with %s", node, tp->target);
return FALSE;
}
stonith_topology_t *
find_topology_for_host(const char *host)
{
GHashTableIter tIter;
stonith_topology_t *tp = g_hash_table_lookup(topology, host);
if(tp != NULL) {
crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
return tp;
}
g_hash_table_iter_init(&tIter, topology);
while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) {
if (topology_matches(tp, host)) {
crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
return tp;
}
}
crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology));
return NULL;
}
/*!
* \internal
* \brief Set fencing operation's device list to target's next topology level
*
* \param[in,out] op Remote fencing operation to modify
* \param[in] empty_ok If true, an operation without a target (i.e.
* queries) or a target without a topology will get a
* pcmk_rc_ok return value instead of ENODEV
*
* \return Standard Pacemaker return value
*/
static int
advance_topology_level(remote_fencing_op_t *op, bool empty_ok)
{
stonith_topology_t *tp = NULL;
if (op->target) {
tp = find_topology_for_host(op->target);
}
if (topology_is_empty(tp)) {
return empty_ok? pcmk_rc_ok : ENODEV;
}
CRM_ASSERT(tp->levels != NULL);
stonith__set_call_options(op->call_options, op->id, st_opt_topology);
/* This is a new level, so undo any remapping left over from previous */
undo_op_remap(op);
do {
op->level++;
- } while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
+ } while (op->level < ST__LEVEL_COUNT && tp->levels[op->level] == NULL);
- if (op->level < ST_LEVEL_MAX) {
+ if (op->level < ST__LEVEL_COUNT) {
crm_trace("Attempting fencing level %d targeting %s (%d devices) "
"for client %s@%s (id=%.8s)",
op->level, op->target, g_list_length(tp->levels[op->level]),
op->client_name, op->originator, op->id);
set_op_device_list(op, tp->levels[op->level]);
// The requested delay has been applied for the first fencing level
if ((op->level > 1) && (op->client_delay > 0)) {
op->client_delay = 0;
}
if ((g_list_next(op->devices_list) != NULL)
&& pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* A reboot has been requested for a topology level with multiple
* devices. Instead of rebooting the devices sequentially, we will
* turn them all off, then turn them all on again. (Think about
* switched power outlets for redundant power supplies.)
*/
op_phase_off(op);
}
return pcmk_rc_ok;
}
crm_info("All %sfencing options targeting %s for client %s@%s failed "
CRM_XS " id=%.8s",
(stonith_watchdog_timeout_ms > 0)?"non-watchdog ":"",
op->target, op->client_name, op->originator, op->id);
return ENODEV;
}
/*!
* \internal
* \brief If fencing operation is a duplicate, merge it into the other one
*
* \param[in,out] op Fencing operation to check
*/
static void
merge_duplicates(remote_fencing_op_t *op)
{
GHashTableIter iter;
remote_fencing_op_t *other = NULL;
time_t now = time(NULL);
g_hash_table_iter_init(&iter, stonith_remote_op_list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
const char *other_action = op_requested_action(other);
crm_node_t *node = NULL;
if (!strcmp(op->id, other->id)) {
continue; // Don't compare against self
}
if (other->state > st_exec) {
crm_trace("%.8s not duplicate of %.8s: not in progress",
op->id, other->id);
continue;
}
if (!pcmk__str_eq(op->target, other->target, pcmk__str_casei)) {
crm_trace("%.8s not duplicate of %.8s: node %s vs. %s",
op->id, other->id, op->target, other->target);
continue;
}
if (!pcmk__str_eq(op->action, other_action, pcmk__str_none)) {
crm_trace("%.8s not duplicate of %.8s: action %s vs. %s",
op->id, other->id, op->action, other_action);
continue;
}
if (pcmk__str_eq(op->client_name, other->client_name, pcmk__str_casei)) {
crm_trace("%.8s not duplicate of %.8s: same client %s",
op->id, other->id, op->client_name);
continue;
}
if (pcmk__str_eq(other->target, other->originator, pcmk__str_casei)) {
crm_trace("%.8s not duplicate of %.8s: suicide for %s",
op->id, other->id, other->target);
continue;
}
node = pcmk__get_node(0, other->originator, NULL,
pcmk__node_search_cluster_member);
if (!fencing_peer_active(node)) {
crm_notice("Failing action '%s' targeting %s originating from "
"client %s@%s: Originator is dead " CRM_XS " id=%.8s",
other->action, other->target, other->client_name,
other->originator, other->id);
crm_trace("%.8s not duplicate of %.8s: originator dead",
op->id, other->id);
other->state = st_failed;
continue;
}
if ((other->total_timeout > 0)
&& (now > (other->total_timeout + other->created))) {
crm_trace("%.8s not duplicate of %.8s: old (%lld vs. %lld + %ds)",
op->id, other->id, (long long)now, (long long)other->created,
other->total_timeout);
continue;
}
/* There is another in-flight request to fence the same host
* Piggyback on that instead. If it fails, so do we.
*/
other->duplicates = g_list_append(other->duplicates, op);
if (other->total_timeout == 0) {
other->total_timeout = op->total_timeout =
TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL);
crm_trace("Best guess as to timeout used for %.8s: %ds",
other->id, other->total_timeout);
}
crm_notice("Merging fencing action '%s' targeting %s originating from "
"client %s with identical request from %s@%s "
CRM_XS " original=%.8s duplicate=%.8s total_timeout=%ds",
op->action, op->target, op->client_name,
other->client_name, other->originator,
op->id, other->id, other->total_timeout);
report_timeout_period(op, other->total_timeout);
op->state = st_duplicate;
}
}
static uint32_t fencing_active_peers(void)
{
uint32_t count = 0;
crm_node_t *entry;
GHashTableIter gIter;
g_hash_table_iter_init(&gIter, crm_peer_cache);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
if(fencing_peer_active(entry)) {
count++;
}
}
return count;
}
/*!
* \internal
* \brief Process a manual confirmation of a pending fence action
*
* \param[in] client IPC client that sent confirmation
* \param[in,out] msg Request XML with manual confirmation
*
* \return Standard Pacemaker return code
*/
int
fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg)
{
remote_fencing_op_t *op = NULL;
xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return EPROTO);
crm_notice("Received manual confirmation that %s has been fenced",
pcmk__s(crm_element_value(dev, PCMK__XA_ST_TARGET),
"unknown target"));
op = initiate_remote_stonith_op(client, msg, TRUE);
if (op == NULL) {
return EPROTO;
}
op->state = st_done;
set_fencing_completed(op);
op->delegate = pcmk__str_copy("a human");
// For the fencer's purposes, the fencing operation is done
pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
finalize_op(op, msg, false);
/* For the requester's purposes, the operation is still pending. The
* actual result will be sent asynchronously via the operation's done_cb().
*/
return EINPROGRESS;
}
/*!
* \internal
* \brief Create a new remote stonith operation
*
* \param[in] client ID of local stonith client that initiated the operation
* \param[in] request The request from the client that started the operation
* \param[in] peer TRUE if this operation is owned by another stonith peer
* (an operation owned by one peer is stored on all peers,
* but only the owner executes it; all nodes get the results
* once the owner finishes execution)
*/
void *
create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer)
{
remote_fencing_op_t *op = NULL;
xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request,
LOG_NEVER);
int call_options = 0;
const char *operation = NULL;
init_stonith_remote_op_hash_table(&stonith_remote_op_list);
/* If this operation is owned by another node, check to make
* sure we haven't already created this operation. */
if (peer && dev) {
const char *op_id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP);
CRM_CHECK(op_id != NULL, return NULL);
op = g_hash_table_lookup(stonith_remote_op_list, op_id);
if (op) {
crm_debug("Reusing existing remote fencing op %.8s for %s",
op_id, ((client == NULL)? "unknown client" : client));
return op;
}
}
op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t));
crm_element_value_int(request, PCMK__XA_ST_TIMEOUT, &(op->base_timeout));
// Value -1 means disable any static/random fencing delays
crm_element_value_int(request, PCMK__XA_ST_DELAY, &(op->client_delay));
if (peer && dev) {
op->id = crm_element_value_copy(dev, PCMK__XA_ST_REMOTE_OP);
} else {
op->id = crm_generate_uuid();
}
g_hash_table_replace(stonith_remote_op_list, op->id, op);
op->state = st_query;
op->replies_expected = fencing_active_peers();
op->action = crm_element_value_copy(dev, PCMK__XA_ST_DEVICE_ACTION);
/* The node initiating the stonith operation. If an operation is relayed,
* this is the last node the operation lands on. When in standalone mode,
* origin is the ID of the client that originated the operation.
*
* Or may be the name of the function that created the operation.
*/
op->originator = crm_element_value_copy(dev, PCMK__XA_ST_ORIGIN);
if (op->originator == NULL) {
/* Local or relayed request */
op->originator = pcmk__str_copy(stonith_our_uname);
}
// Delegate may not be set
op->delegate = crm_element_value_copy(dev, PCMK__XA_ST_DELEGATE);
op->created = time(NULL);
CRM_LOG_ASSERT(client != NULL);
op->client_id = pcmk__str_copy(client);
/* For a RELAY operation, set fenced on the client. */
operation = crm_element_value(request, PCMK__XA_ST_OP);
if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) {
op->client_name = crm_strdup_printf("%s.%lu", crm_system_name,
(unsigned long) getpid());
} else {
op->client_name = crm_element_value_copy(request,
PCMK__XA_ST_CLIENTNAME);
}
op->target = crm_element_value_copy(dev, PCMK__XA_ST_TARGET);
// @TODO Figure out how to avoid copying XML here
op->request = pcmk__xml_copy(NULL, request);
crm_element_value_int(request, PCMK__XA_ST_CALLOPT, &call_options);
op->call_options = call_options;
crm_element_value_int(request, PCMK__XA_ST_CALLID, &(op->client_callid));
crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, "
"base timeout %ds, %u %s expected)",
(peer && dev)? "Recorded" : "Generated", op->id, op->action,
op->target, op->client_name, op->base_timeout,
op->replies_expected,
pcmk__plural_alt(op->replies_expected, "reply", "replies"));
if (op->call_options & st_opt_cs_nodeid) {
int nodeid;
crm_node_t *node;
pcmk__scan_min_int(op->target, &nodeid, 0);
node = pcmk__search_node_caches(nodeid, NULL,
pcmk__node_search_any
|pcmk__node_search_cluster_cib);
/* Ensure the conversion only happens once */
stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid);
if (node && node->uname) {
pcmk__str_update(&(op->target), node->uname);
} else {
crm_warn("Could not expand nodeid '%s' into a host name", op->target);
}
}
/* check to see if this is a duplicate operation of another in-flight operation */
merge_duplicates(op);
if (op->state != st_duplicate) {
/* kick history readers */
fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL);
}
/* safe to trim as long as that doesn't touch pending ops */
stonith_fence_history_trim();
return op;
}
/*!
* \internal
* \brief Create a peer fencing operation from a request, and initiate it
*
* \param[in] client IPC client that made request (NULL to get from request)
* \param[in] request Request XML
* \param[in] manual_ack Whether this is a manual action confirmation
*
* \return Newly created operation on success, otherwise NULL
*/
remote_fencing_op_t *
initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request,
gboolean manual_ack)
{
int query_timeout = 0;
xmlNode *query = NULL;
const char *client_id = NULL;
remote_fencing_op_t *op = NULL;
const char *relay_op_id = NULL;
const char *operation = NULL;
if (client) {
client_id = client->id;
} else {
client_id = crm_element_value(request, PCMK__XA_ST_CLIENTID);
}
CRM_LOG_ASSERT(client_id != NULL);
op = create_remote_stonith_op(client_id, request, FALSE);
op->owner = TRUE;
if (manual_ack) {
return op;
}
CRM_CHECK(op->action, return NULL);
if (advance_topology_level(op, true) != pcmk_rc_ok) {
op->state = st_failed;
}
switch (op->state) {
case st_failed:
// advance_topology_level() exhausted levels
pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_ERROR,
"All topology levels failed");
crm_warn("Could not request peer fencing (%s) targeting %s "
CRM_XS " id=%.8s", op->action, op->target, op->id);
finalize_op(op, NULL, false);
return op;
case st_duplicate:
crm_info("Requesting peer fencing (%s) targeting %s (duplicate) "
CRM_XS " id=%.8s", op->action, op->target, op->id);
return op;
default:
crm_notice("Requesting peer fencing (%s) targeting %s "
CRM_XS " id=%.8s state=%s base_timeout=%ds",
op->action, op->target, op->id,
stonith_op_state_str(op->state), op->base_timeout);
}
query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY,
NULL, op->call_options);
crm_xml_add(query, PCMK__XA_ST_REMOTE_OP, op->id);
crm_xml_add(query, PCMK__XA_ST_TARGET, op->target);
crm_xml_add(query, PCMK__XA_ST_DEVICE_ACTION, op_requested_action(op));
crm_xml_add(query, PCMK__XA_ST_ORIGIN, op->originator);
crm_xml_add(query, PCMK__XA_ST_CLIENTID, op->client_id);
crm_xml_add(query, PCMK__XA_ST_CLIENTNAME, op->client_name);
crm_xml_add_int(query, PCMK__XA_ST_TIMEOUT, op->base_timeout);
/* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */
operation = crm_element_value(request, PCMK__XA_ST_OP);
if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) {
relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP);
if (relay_op_id) {
crm_xml_add(query, PCMK__XA_ST_REMOTE_OP_RELAY, relay_op_id);
}
}
pcmk__cluster_send_message(NULL, crm_msg_stonith_ng, query);
free_xml(query);
query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
return op;
}
enum find_best_peer_options {
/*! Skip checking the target peer for capable fencing devices */
FIND_PEER_SKIP_TARGET = 0x0001,
/*! Only check the target peer for capable fencing devices */
FIND_PEER_TARGET_ONLY = 0x0002,
/*! Skip peers and devices that are not verified */
FIND_PEER_VERIFIED_ONLY = 0x0004,
};
static bool
is_watchdog_fencing(const remote_fencing_op_t *op, const char *device)
{
return (stonith_watchdog_timeout_ms > 0
// Only an explicit mismatch is considered not a watchdog fencing.
&& pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_null_matches)
&& pcmk__is_fencing_action(op->action)
&& node_does_watchdog_fencing(op->target));
}
static peer_device_info_t *
find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
{
GList *iter = NULL;
gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
if (!device && pcmk_is_set(op->call_options, st_opt_topology)) {
return NULL;
}
for (iter = op->query_results; iter != NULL; iter = iter->next) {
peer_device_info_t *peer = iter->data;
crm_trace("Testing result from %s targeting %s with %d device%s: %d %x",
peer->host, op->target, peer->ndevices,
pcmk__plural_s(peer->ndevices), peer->tried, options);
if ((options & FIND_PEER_SKIP_TARGET) && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
continue;
}
if ((options & FIND_PEER_TARGET_ONLY) && !pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
continue;
}
if (pcmk_is_set(op->call_options, st_opt_topology)) {
if (grab_peer_device(op, peer, device, verified_devices_only)) {
return peer;
}
} else if (!peer->tried
&& count_peer_devices(op, peer, verified_devices_only,
fenced_support_flag(op->action))) {
/* No topology: Use the current best peer */
crm_trace("Simple fencing");
return peer;
}
}
return NULL;
}
static peer_device_info_t *
stonith_choose_peer(remote_fencing_op_t * op)
{
const char *device = NULL;
peer_device_info_t *peer = NULL;
uint32_t active = fencing_active_peers();
do {
if (op->devices) {
device = op->devices->data;
crm_trace("Checking for someone to fence (%s) %s using %s",
op->action, op->target, device);
} else {
crm_trace("Checking for someone to fence (%s) %s",
op->action, op->target);
}
/* Best choice is a peer other than the target with verified access */
peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY);
if (peer) {
crm_trace("Found verified peer %s for %s", peer->host, device?device:"<any>");
return peer;
}
if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) {
crm_trace("Waiting before looking for unverified devices to fence %s", op->target);
return NULL;
}
/* If no other peer has verified access, next best is unverified access */
peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET);
if (peer) {
crm_trace("Found best unverified peer %s", peer->host);
return peer;
}
/* If no other peer can do it, last option is self-fencing
* (which is never allowed for the "on" phase of a remapped reboot)
*/
if (op->phase != st_phase_on) {
peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY);
if (peer) {
crm_trace("%s will fence itself", peer->host);
return peer;
}
}
/* Try the next fencing level if there is one (unless we're in the "on"
* phase of a remapped "reboot", because we ignore errors in that case)
*/
} while ((op->phase != st_phase_on)
&& pcmk_is_set(op->call_options, st_opt_topology)
&& (advance_topology_level(op, false) == pcmk_rc_ok));
/* With a simple watchdog fencing configuration without a topology,
* "device" is NULL here. Consider it should be done with watchdog fencing.
*/
if (is_watchdog_fencing(op, device)) {
crm_info("Couldn't contact watchdog-fencing target-node (%s)",
op->target);
/* check_watchdog_fencing_and_wait will log additional info */
} else {
crm_notice("Couldn't find anyone to fence (%s) %s using %s",
op->action, op->target, (device? device : "any device"));
}
return NULL;
}
static int
valid_fencing_timeout(int specified_timeout, bool action_specific,
const remote_fencing_op_t *op, const char *device)
{
int timeout = specified_timeout;
if (!is_watchdog_fencing(op, device)) {
return timeout;
}
timeout = (int) QB_MIN(QB_MAX(specified_timeout,
stonith_watchdog_timeout_ms / 1000), INT_MAX);
if (timeout > specified_timeout) {
if (action_specific) {
crm_warn("pcmk_%s_timeout %ds for %s is too short (must be >= "
PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds "
"instead",
op->action, specified_timeout, device? device : "watchdog",
timeout, timeout);
} else {
crm_warn("Fencing timeout %ds is too short (must be >= "
PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds "
"instead",
specified_timeout, timeout, timeout);
}
}
return timeout;
}
static int
get_device_timeout(const remote_fencing_op_t *op,
const peer_device_info_t *peer, const char *device,
bool with_delay)
{
int timeout = op->base_timeout;
device_properties_t *props;
timeout = valid_fencing_timeout(op->base_timeout, false, op, device);
if (!peer || !device) {
return timeout;
}
props = g_hash_table_lookup(peer->devices, device);
if (!props) {
return timeout;
}
if (props->custom_action_timeout[op->phase]) {
timeout = valid_fencing_timeout(props->custom_action_timeout[op->phase],
true, op, device);
}
// op->client_delay < 0 means disable any static/random fencing delays
if (with_delay && (op->client_delay >= 0)) {
// delay_base is eventually limited by delay_max
timeout += (props->delay_max[op->phase] > 0 ?
props->delay_max[op->phase] : props->delay_base[op->phase]);
}
return timeout;
}
struct timeout_data {
const remote_fencing_op_t *op;
const peer_device_info_t *peer;
int total_timeout;
};
/*!
* \internal
* \brief Add timeout to a total if device has not been executed yet
*
* \param[in] key GHashTable key (device ID)
* \param[in] value GHashTable value (device properties)
* \param[in,out] user_data Timeout data
*/
static void
add_device_timeout(gpointer key, gpointer value, gpointer user_data)
{
const char *device_id = key;
device_properties_t *props = value;
struct timeout_data *timeout = user_data;
if (!props->executed[timeout->op->phase]
&& !props->disallowed[timeout->op->phase]) {
timeout->total_timeout += get_device_timeout(timeout->op, timeout->peer,
device_id, true);
}
}
static int
get_peer_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer)
{
struct timeout_data timeout;
timeout.op = op;
timeout.peer = peer;
timeout.total_timeout = 0;
g_hash_table_foreach(peer->devices, add_device_timeout, &timeout);
return (timeout.total_timeout? timeout.total_timeout : op->base_timeout);
}
static int
get_op_total_timeout(const remote_fencing_op_t *op,
const peer_device_info_t *chosen_peer)
{
long long total_timeout = 0;
stonith_topology_t *tp = find_topology_for_host(op->target);
if (pcmk_is_set(op->call_options, st_opt_topology) && tp) {
int i;
GList *device_list = NULL;
GList *iter = NULL;
GList *auto_list = NULL;
if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)
&& (op->automatic_list != NULL)) {
auto_list = g_list_copy(op->automatic_list);
}
/* Yep, this looks scary, nested loops all over the place.
* Here is what is going on.
* Loop1: Iterate through fencing levels.
* Loop2: If a fencing level has devices, loop through each device
* Loop3: For each device in a fencing level, see what peer owns it
* and what that peer has reported the timeout is for the device.
*/
- for (i = 0; i < ST_LEVEL_MAX; i++) {
+ for (i = 0; i < ST__LEVEL_COUNT; i++) {
if (!tp->levels[i]) {
continue;
}
for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
bool found = false;
for (iter = op->query_results; iter != NULL; iter = iter->next) {
const peer_device_info_t *peer = iter->data;
if (auto_list) {
GList *match = g_list_find_custom(auto_list, device_list->data,
sort_strings);
if (match) {
auto_list = g_list_remove(auto_list, match->data);
}
}
if (find_peer_device(op, peer, device_list->data,
fenced_support_flag(op->action))) {
total_timeout += get_device_timeout(op, peer,
device_list->data,
true);
found = true;
break;
}
} /* End Loop3: match device with peer that owns device, find device's timeout period */
/* in case of watchdog-device we add the timeout to the budget
if didn't get a reply
*/
if (!found && is_watchdog_fencing(op, device_list->data)) {
total_timeout += stonith_watchdog_timeout_ms / 1000;
}
} /* End Loop2: iterate through devices at a specific level */
} /*End Loop1: iterate through fencing levels */
//Add only exists automatic_list device timeout
if (auto_list) {
for (iter = auto_list; iter != NULL; iter = iter->next) {
GList *iter2 = NULL;
for (iter2 = op->query_results; iter2 != NULL; iter = iter2->next) {
peer_device_info_t *peer = iter2->data;
if (find_peer_device(op, peer, iter->data, st_device_supports_on)) {
total_timeout += get_device_timeout(op, peer,
iter->data, true);
break;
}
}
}
}
g_list_free(auto_list);
} else if (chosen_peer) {
total_timeout = get_peer_timeout(op, chosen_peer);
} else {
total_timeout = valid_fencing_timeout(op->base_timeout, false, op,
NULL);
}
if (total_timeout <= 0) {
total_timeout = op->base_timeout;
}
/* Take any requested fencing delay into account to prevent it from eating
* up the total timeout.
*/
if (op->client_delay > 0) {
total_timeout += op->client_delay;
}
return (int) QB_MIN(total_timeout, INT_MAX);
}
static void
report_timeout_period(remote_fencing_op_t * op, int op_timeout)
{
GList *iter = NULL;
xmlNode *update = NULL;
const char *client_node = NULL;
const char *client_id = NULL;
const char *call_id = NULL;
if (op->call_options & st_opt_sync_call) {
/* There is no reason to report the timeout for a synchronous call. It
* is impossible to use the reported timeout to do anything when the client
* is blocking for the response. This update is only important for
* async calls that require a callback to report the results in. */
return;
} else if (!op->request) {
return;
}
crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id);
client_node = crm_element_value(op->request, PCMK__XA_ST_CLIENTNODE);
call_id = crm_element_value(op->request, PCMK__XA_ST_CALLID);
client_id = crm_element_value(op->request, PCMK__XA_ST_CLIENTID);
if (!client_node || !call_id || !client_id) {
return;
}
if (pcmk__str_eq(client_node, stonith_our_uname, pcmk__str_casei)) {
// Client is connected to this node, so send update directly to them
do_stonith_async_timeout_update(client_id, call_id, op_timeout);
return;
}
/* The client is connected to another node, relay this update to them */
update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
crm_xml_add(update, PCMK__XA_ST_REMOTE_OP, op->id);
crm_xml_add(update, PCMK__XA_ST_CLIENTID, client_id);
crm_xml_add(update, PCMK__XA_ST_CALLID, call_id);
crm_xml_add_int(update, PCMK__XA_ST_TIMEOUT, op_timeout);
pcmk__cluster_send_message(pcmk__get_node(0, client_node, NULL,
pcmk__node_search_cluster_member),
crm_msg_stonith_ng, update);
free_xml(update);
for (iter = op->duplicates; iter != NULL; iter = iter->next) {
remote_fencing_op_t *dup = iter->data;
crm_trace("Reporting timeout for duplicate %.8s to client %s",
dup->id, dup->client_name);
report_timeout_period(iter->data, op_timeout);
}
}
/*!
* \internal
* \brief Advance an operation to the next device in its topology
*
* \param[in,out] op Fencer operation to advance
* \param[in] device ID of device that just completed
* \param[in,out] msg If not NULL, XML reply of last delegated operation
*/
static void
advance_topology_device_in_level(remote_fencing_op_t *op, const char *device,
xmlNode *msg)
{
/* Advance to the next device at this topology level, if any */
if (op->devices) {
op->devices = op->devices->next;
}
/* Handle automatic unfencing if an "on" action was requested */
if ((op->phase == st_phase_requested)
&& pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) {
/* If the device we just executed was required, it's not anymore */
remove_required_device(op, device);
/* If there are no more devices at this topology level, run through any
* remaining devices with automatic unfencing
*/
if (op->devices == NULL) {
op->devices = op->automatic_list;
}
}
if ((op->devices == NULL) && (op->phase == st_phase_off)) {
/* We're done with this level and with required devices, but we had
* remapped "reboot" to "off", so start over with "on". If any devices
* need to be turned back on, op->devices will be non-NULL after this.
*/
op_phase_on(op);
}
// This function is only called if the previous device succeeded
pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
if (op->devices) {
/* Necessary devices remain, so execute the next one */
crm_trace("Next targeting %s on behalf of %s@%s",
op->target, op->client_name, op->originator);
// The requested delay has been applied for the first device
if (op->client_delay > 0) {
op->client_delay = 0;
}
request_peer_fencing(op, NULL);
} else {
/* We're done with all devices and phases, so finalize operation */
crm_trace("Marking complex fencing op targeting %s as complete",
op->target);
op->state = st_done;
finalize_op(op, msg, false);
}
}
static gboolean
check_watchdog_fencing_and_wait(remote_fencing_op_t * op)
{
if (node_does_watchdog_fencing(op->target)) {
guint timeout_ms = QB_MIN(stonith_watchdog_timeout_ms, UINT_MAX);
crm_notice("Waiting %s for %s to self-fence (%s) for "
"client %s " CRM_XS " id=%.8s",
pcmk__readable_interval(timeout_ms), op->target, op->action,
op->client_name, op->id);
if (op->op_timer_one) {
g_source_remove(op->op_timer_one);
}
op->op_timer_one = g_timeout_add(timeout_ms, remote_op_watchdog_done,
op);
return TRUE;
} else {
crm_debug("Skipping fallback to watchdog-fencing as %s is "
"not in host-list", op->target);
}
return FALSE;
}
/*!
* \internal
* \brief Ask a peer to execute a fencing operation
*
* \param[in,out] op Fencing operation to be executed
* \param[in,out] peer If NULL or topology is in use, choose best peer to
* execute the fencing, otherwise use this peer
*/
static void
request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer)
{
const char *device = NULL;
int timeout;
CRM_CHECK(op != NULL, return);
crm_trace("Action %.8s targeting %s for %s is %s",
op->id, op->target, op->client_name,
stonith_op_state_str(op->state));
if ((op->phase == st_phase_on) && (op->devices != NULL)) {
/* We are in the "on" phase of a remapped topology reboot. If this
* device has pcmk_reboot_action="off", or doesn't support the "on"
* action, skip it.
*
* We can't check device properties at this point because we haven't
* chosen a peer for this stage yet. Instead, we check the local node's
* knowledge about the device. If different versions of the fence agent
* are installed on different nodes, there's a chance this could be
* mistaken, but the worst that could happen is we don't try turning the
* node back on when we should.
*/
device = op->devices->data;
if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF,
pcmk__str_none)) {
crm_info("Not turning %s back on using %s because the device is "
"configured to stay off (pcmk_reboot_action='off')",
op->target, device);
advance_topology_device_in_level(op, device, NULL);
return;
}
if (!fenced_device_supports_on(device)) {
crm_info("Not turning %s back on using %s because the agent "
"doesn't support 'on'", op->target, device);
advance_topology_device_in_level(op, device, NULL);
return;
}
}
timeout = op->base_timeout;
if ((peer == NULL) && !pcmk_is_set(op->call_options, st_opt_topology)) {
peer = stonith_choose_peer(op);
}
if (!op->op_timer_total) {
op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, peer);
op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
report_timeout_period(op, op->total_timeout);
crm_info("Total timeout set to %ds for peer's fencing targeting %s for %s"
CRM_XS "id=%.8s",
op->total_timeout, op->target, op->client_name, op->id);
}
if (pcmk_is_set(op->call_options, st_opt_topology) && op->devices) {
/* Ignore the caller's peer preference if topology is in use, because
* that peer might not have access to the required device. With
* topology, stonith_choose_peer() removes the device from further
* consideration, so the timeout must be calculated beforehand.
*
* @TODO Basing the total timeout on the caller's preferred peer (above)
* is less than ideal.
*/
peer = stonith_choose_peer(op);
device = op->devices->data;
/* Fencing timeout sent to peer takes no delay into account.
* The peer will add a dedicated timer for any delay upon
* schedule_stonith_command().
*/
timeout = get_device_timeout(op, peer, device, false);
}
if (peer) {
int timeout_one = 0;
xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
const crm_node_t *peer_node =
pcmk__get_node(0, peer->host, NULL,
pcmk__node_search_cluster_member);
if (op->client_delay > 0) {
/* Take requested fencing delay into account to prevent it from
* eating up the timeout.
*/
timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay;
}
crm_xml_add(remote_op, PCMK__XA_ST_REMOTE_OP, op->id);
crm_xml_add(remote_op, PCMK__XA_ST_TARGET, op->target);
crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ACTION, op->action);
crm_xml_add(remote_op, PCMK__XA_ST_ORIGIN, op->originator);
crm_xml_add(remote_op, PCMK__XA_ST_CLIENTID, op->client_id);
crm_xml_add(remote_op, PCMK__XA_ST_CLIENTNAME, op->client_name);
crm_xml_add_int(remote_op, PCMK__XA_ST_TIMEOUT, timeout);
crm_xml_add_int(remote_op, PCMK__XA_ST_CALLOPT, op->call_options);
crm_xml_add_int(remote_op, PCMK__XA_ST_DELAY, op->client_delay);
if (device) {
timeout_one += TIMEOUT_MULTIPLY_FACTOR *
get_device_timeout(op, peer, device, true);
crm_notice("Requesting that %s perform '%s' action targeting %s "
"using %s " CRM_XS " for client %s (%ds)",
peer->host, op->action, op->target, device,
op->client_name, timeout_one);
crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ID, device);
} else {
timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer);
crm_notice("Requesting that %s perform '%s' action targeting %s "
CRM_XS " for client %s (%ds, %s)",
peer->host, op->action, op->target, op->client_name,
timeout_one,
pcmk__readable_interval(stonith_watchdog_timeout_ms));
}
op->state = st_exec;
if (op->op_timer_one) {
g_source_remove(op->op_timer_one);
op->op_timer_one = 0;
}
if (!is_watchdog_fencing(op, device)
|| !check_watchdog_fencing_and_wait(op)) {
/* Some thoughts about self-fencing cases reaching this point:
- Actually check in check_watchdog_fencing_and_wait
shouldn't fail if STONITH_WATCHDOG_ID is
chosen as fencing-device and it being present implies
watchdog-fencing is enabled anyway
- If watchdog-fencing is disabled either in general or for
a specific target - detected in check_watchdog_fencing_and_wait -
for some other kind of self-fencing we can't expect
a success answer but timeout is fine if the node doesn't
come back in between
- Delicate might be the case where we have watchdog-fencing
enabled for a node but the watchdog-fencing-device isn't
explicitly chosen for suicide. Local pe-execution in sbd
may detect the node as unclean and lead to timely suicide.
Otherwise the selection of PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
at least is questionable.
*/
/* coming here we're not waiting for watchdog timeout -
thus engage timer with timout evaluated before */
op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
}
pcmk__cluster_send_message(peer_node, crm_msg_stonith_ng, remote_op);
peer->tried = TRUE;
free_xml(remote_op);
return;
} else if (op->phase == st_phase_on) {
/* A remapped "on" cannot be executed, but the node was already
* turned off successfully, so ignore the error and continue.
*/
crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s "
"after successful 'off'", device, op->target);
advance_topology_device_in_level(op, device, NULL);
return;
} else if (op->owner == FALSE) {
crm_err("Fencing (%s) targeting %s for client %s is not ours to control",
op->action, op->target, op->client_name);
} else if (op->query_timer == 0) {
/* We've exhausted all available peers */
crm_info("No remaining peers capable of fencing (%s) %s for client %s "
CRM_XS " state=%s", op->action, op->target, op->client_name,
stonith_op_state_str(op->state));
CRM_CHECK(op->state < st_done, return);
finalize_timed_out_op(op, "All nodes failed, or are unable, to "
"fence target");
} else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
/* if the operation never left the query state,
* but we have all the expected replies, then no devices
* are available to execute the fencing operation. */
if (is_watchdog_fencing(op, device)
&& check_watchdog_fencing_and_wait(op)) {
/* Consider a watchdog fencing targeting an offline node executing
* once it starts waiting for the target to self-fence. So that when
* the query timer pops, remote_op_query_timeout() considers the
* fencing already in progress.
*/
op->state = st_exec;
return;
}
if (op->state == st_query) {
crm_info("No peers (out of %d) have devices capable of fencing "
"(%s) %s for client %s " CRM_XS " state=%s",
op->replies, op->action, op->target, op->client_name,
stonith_op_state_str(op->state));
pcmk__reset_result(&op->result);
pcmk__set_result(&op->result, CRM_EX_ERROR,
PCMK_EXEC_NO_FENCE_DEVICE, NULL);
} else {
if (pcmk_is_set(op->call_options, st_opt_topology)) {
pcmk__reset_result(&op->result);
pcmk__set_result(&op->result, CRM_EX_ERROR,
PCMK_EXEC_NO_FENCE_DEVICE, NULL);
}
/* ... else use existing result from previous failed attempt
* (topology is not in use, and no devices remain to be attempted).
* Overwriting the result with PCMK_EXEC_NO_FENCE_DEVICE would
* prevent finalize_op() from setting the correct delegate if
* needed.
*/
crm_info("No peers (out of %d) are capable of fencing (%s) %s "
"for client %s " CRM_XS " state=%s",
op->replies, op->action, op->target, op->client_name,
stonith_op_state_str(op->state));
}
op->state = st_failed;
finalize_op(op, NULL, false);
} else {
crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s "
"for client %s " CRM_XS " id=%.8s",
op->action, op->target, (device? " using " : ""),
(device? device : ""), op->client_name, op->id);
}
}
/*!
* \internal
* \brief Comparison function for sorting query results
*
* \param[in] a GList item to compare
* \param[in] b GList item to compare
*
* \return Per the glib documentation, "a negative integer if the first value
* comes before the second, 0 if they are equal, or a positive integer
* if the first value comes after the second."
*/
static gint
sort_peers(gconstpointer a, gconstpointer b)
{
const peer_device_info_t *peer_a = a;
const peer_device_info_t *peer_b = b;
return (peer_b->ndevices - peer_a->ndevices);
}
/*!
* \internal
* \brief Determine if all the devices in the topology are found or not
*
* \param[in] op Fencing operation with topology to check
*/
static gboolean
all_topology_devices_found(const remote_fencing_op_t *op)
{
GList *device = NULL;
GList *iter = NULL;
device_properties_t *match = NULL;
stonith_topology_t *tp = NULL;
gboolean skip_target = FALSE;
int i;
tp = find_topology_for_host(op->target);
if (!tp) {
return FALSE;
}
if (pcmk__is_fencing_action(op->action)) {
/* Don't count the devices on the target node if we are killing
* the target node. */
skip_target = TRUE;
}
- for (i = 0; i < ST_LEVEL_MAX; i++) {
+ for (i = 0; i < ST__LEVEL_COUNT; i++) {
for (device = tp->levels[i]; device; device = device->next) {
match = NULL;
for (iter = op->query_results; iter && !match; iter = iter->next) {
peer_device_info_t *peer = iter->data;
if (skip_target && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
continue;
}
match = find_peer_device(op, peer, device->data, st_device_supports_none);
}
if (!match) {
return FALSE;
}
}
}
return TRUE;
}
/*!
* \internal
* \brief Parse action-specific device properties from XML
*
* \param[in] xml XML element containing the properties
* \param[in] peer Name of peer that sent XML (for logs)
* \param[in] device Device ID (for logs)
* \param[in] action Action the properties relate to (for logs)
* \param[in,out] op Fencing operation that properties are being parsed for
* \param[in] phase Phase the properties relate to
* \param[in,out] props Device properties to update
*/
static void
parse_action_specific(const xmlNode *xml, const char *peer, const char *device,
const char *action, remote_fencing_op_t *op,
enum st_remap_phase phase, device_properties_t *props)
{
props->custom_action_timeout[phase] = 0;
crm_element_value_int(xml, PCMK__XA_ST_ACTION_TIMEOUT,
&props->custom_action_timeout[phase]);
if (props->custom_action_timeout[phase]) {
crm_trace("Peer %s with device %s returned %s action timeout %ds",
peer, device, action, props->custom_action_timeout[phase]);
}
props->delay_max[phase] = 0;
crm_element_value_int(xml, PCMK__XA_ST_DELAY_MAX, &props->delay_max[phase]);
if (props->delay_max[phase]) {
crm_trace("Peer %s with device %s returned maximum of random delay %ds for %s",
peer, device, props->delay_max[phase], action);
}
props->delay_base[phase] = 0;
crm_element_value_int(xml, PCMK__XA_ST_DELAY_BASE,
&props->delay_base[phase]);
if (props->delay_base[phase]) {
crm_trace("Peer %s with device %s returned base delay %ds for %s",
peer, device, props->delay_base[phase], action);
}
/* Handle devices with automatic unfencing */
if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
int required = 0;
crm_element_value_int(xml, PCMK__XA_ST_REQUIRED, &required);
if (required) {
crm_trace("Peer %s requires device %s to execute for action %s",
peer, device, action);
add_required_device(op, device);
}
}
/* If a reboot is remapped to off+on, it's possible that a node is allowed
* to perform one action but not another.
*/
if (pcmk__xe_attr_is_true(xml, PCMK__XA_ST_ACTION_DISALLOWED)) {
props->disallowed[phase] = TRUE;
crm_trace("Peer %s is disallowed from executing %s for device %s",
peer, action, device);
}
}
/*!
* \internal
* \brief Parse one device's properties from peer's XML query reply
*
* \param[in] xml XML node containing device properties
* \param[in,out] op Operation that query and reply relate to
* \param[in,out] peer Peer's device information
* \param[in] device ID of device being parsed
*/
static void
add_device_properties(const xmlNode *xml, remote_fencing_op_t *op,
peer_device_info_t *peer, const char *device)
{
xmlNode *child;
int verified = 0;
device_properties_t *props =
pcmk__assert_alloc(1, sizeof(device_properties_t));
int flags = st_device_supports_on; /* Old nodes that don't set the flag assume they support the on action */
/* Add a new entry to this peer's devices list */
g_hash_table_insert(peer->devices, pcmk__str_copy(device), props);
/* Peers with verified (monitored) access will be preferred */
crm_element_value_int(xml, PCMK__XA_ST_MONITOR_VERIFIED, &verified);
if (verified) {
crm_trace("Peer %s has confirmed a verified device %s",
peer->host, device);
props->verified = TRUE;
}
crm_element_value_int(xml, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, &flags);
props->device_support_flags = flags;
/* Parse action-specific device properties */
parse_action_specific(xml, peer->host, device, op_requested_action(op),
op, st_phase_requested, props);
for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL;
child = pcmk__xe_next(child)) {
/* Replies for "reboot" operations will include the action-specific
* values for "off" and "on" in child elements, just in case the reboot
* winds up getting remapped.
*/
if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_OFF, pcmk__str_none)) {
parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF,
op, st_phase_off, props);
} else if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_ON,
pcmk__str_none)) {
parse_action_specific(child, peer->host, device, PCMK_ACTION_ON,
op, st_phase_on, props);
}
}
}
/*!
* \internal
* \brief Parse a peer's XML query reply and add it to operation's results
*
* \param[in,out] op Operation that query and reply relate to
* \param[in] host Name of peer that sent this reply
* \param[in] ndevices Number of devices expected in reply
* \param[in] xml XML node containing device list
*
* \return Newly allocated result structure with parsed reply
*/
static peer_device_info_t *
add_result(remote_fencing_op_t *op, const char *host, int ndevices,
const xmlNode *xml)
{
peer_device_info_t *peer = pcmk__assert_alloc(1,
sizeof(peer_device_info_t));
xmlNode *child;
peer->host = pcmk__str_copy(host);
peer->devices = pcmk__strkey_table(free, free);
/* Each child element describes one capable device available to the peer */
for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL;
child = pcmk__xe_next(child)) {
const char *device = pcmk__xe_id(child);
if (device) {
add_device_properties(child, op, peer, device);
}
}
peer->ndevices = g_hash_table_size(peer->devices);
CRM_CHECK(ndevices == peer->ndevices,
crm_err("Query claimed to have %d device%s but %d found",
ndevices, pcmk__plural_s(ndevices), peer->ndevices));
op->query_results = g_list_insert_sorted(op->query_results, peer, sort_peers);
return peer;
}
/*!
* \internal
* \brief Handle a peer's reply to our fencing query
*
* Parse a query result from XML and store it in the remote operation
* table, and when enough replies have been received, issue a fencing request.
*
* \param[in] msg XML reply received
*
* \return pcmk_ok on success, -errno on error
*
* \note See initiate_remote_stonith_op() for how the XML query was initially
* formed, and stonith_query() for how the peer formed its XML reply.
*/
int
process_remote_stonith_query(xmlNode *msg)
{
int ndevices = 0;
gboolean host_is_target = FALSE;
gboolean have_all_replies = FALSE;
const char *id = NULL;
const char *host = NULL;
remote_fencing_op_t *op = NULL;
peer_device_info_t *peer = NULL;
uint32_t replies_expected;
xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return -EPROTO);
id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP);
CRM_CHECK(id != NULL, return -EPROTO);
dev = get_xpath_object("//@" PCMK__XA_ST_AVAILABLE_DEVICES, msg, LOG_ERR);
CRM_CHECK(dev != NULL, return -EPROTO);
crm_element_value_int(dev, PCMK__XA_ST_AVAILABLE_DEVICES, &ndevices);
op = g_hash_table_lookup(stonith_remote_op_list, id);
if (op == NULL) {
crm_debug("Received query reply for unknown or expired operation %s",
id);
return -EOPNOTSUPP;
}
replies_expected = fencing_active_peers();
if (op->replies_expected < replies_expected) {
replies_expected = op->replies_expected;
}
if ((++op->replies >= replies_expected) && (op->state == st_query)) {
have_all_replies = TRUE;
}
host = crm_element_value(msg, PCMK__XA_SRC);
host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei);
crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s",
op->replies, replies_expected, host,
op->target, op->action, ndevices, pcmk__plural_s(ndevices), id);
if (ndevices > 0) {
peer = add_result(op, host, ndevices, dev);
}
pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
if (pcmk_is_set(op->call_options, st_opt_topology)) {
/* If we start the fencing before all the topology results are in,
* it is possible fencing levels will be skipped because of the missing
* query results. */
if (op->state == st_query && all_topology_devices_found(op)) {
/* All the query results are in for the topology, start the fencing ops. */
crm_trace("All topology devices found");
request_peer_fencing(op, peer);
} else if (have_all_replies) {
crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ",
replies_expected, op->replies);
request_peer_fencing(op, NULL);
}
} else if (op->state == st_query) {
int nverified = count_peer_devices(op, peer, TRUE,
fenced_support_flag(op->action));
/* We have a result for a non-topology fencing op that looks promising,
* go ahead and start fencing before query timeout */
if ((peer != NULL) && !host_is_target && nverified) {
/* we have a verified device living on a peer that is not the target */
crm_trace("Found %d verified device%s",
nverified, pcmk__plural_s(nverified));
request_peer_fencing(op, peer);
} else if (have_all_replies) {
crm_info("All query replies have arrived, continuing (%d expected/%d received) ",
replies_expected, op->replies);
request_peer_fencing(op, NULL);
} else {
crm_trace("Waiting for more peer results before launching fencing operation");
}
} else if ((peer != NULL) && (op->state == st_done)) {
crm_info("Discarding query result from %s (%d device%s): "
"Operation is %s", peer->host,
peer->ndevices, pcmk__plural_s(peer->ndevices),
stonith_op_state_str(op->state));
}
return pcmk_ok;
}
/*!
* \internal
* \brief Handle a peer's reply to a fencing request
*
* Parse a fencing reply from XML, and either finalize the operation
* or attempt another device as appropriate.
*
* \param[in] msg XML reply received
*/
void
fenced_process_fencing_reply(xmlNode *msg)
{
const char *id = NULL;
const char *device = NULL;
remote_fencing_op_t *op = NULL;
xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR);
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
CRM_CHECK(dev != NULL, return);
id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP);
CRM_CHECK(id != NULL, return);
dev = stonith__find_xe_with_result(msg);
CRM_CHECK(dev != NULL, return);
stonith__xe_get_result(dev, &result);
device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (stonith_remote_op_list) {
op = g_hash_table_lookup(stonith_remote_op_list, id);
}
if ((op == NULL) && pcmk__result_ok(&result)) {
/* Record successful fencing operations */
const char *client_id = crm_element_value(dev, PCMK__XA_ST_CLIENTID);
op = create_remote_stonith_op(client_id, dev, TRUE);
}
if (op == NULL) {
/* Could be for an event that began before we started */
/* TODO: Record the op for later querying */
crm_info("Received peer result of unknown or expired operation %s", id);
pcmk__reset_result(&result);
return;
}
pcmk__reset_result(&op->result);
op->result = result; // The operation takes ownership of the result
if (op->devices && device && !pcmk__str_eq(op->devices->data, device, pcmk__str_casei)) {
crm_err("Received outdated reply for device %s (instead of %s) to "
"fence (%s) %s. Operation already timed out at peer level.",
device, (const char *) op->devices->data, op->action, op->target);
return;
}
if (pcmk__str_eq(crm_element_value(msg, PCMK__XA_SUBT),
PCMK__VALUE_BROADCAST, pcmk__str_none)) {
if (pcmk__result_ok(&op->result)) {
op->state = st_done;
} else {
op->state = st_failed;
}
finalize_op(op, msg, false);
return;
} else if (!pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) {
/* If this isn't a remote level broadcast, and we are not the
* originator of the operation, we should not be receiving this msg. */
crm_err("Received non-broadcast fencing result for operation %.8s "
"we do not own (device %s targeting %s)",
op->id, device, op->target);
return;
}
if (pcmk_is_set(op->call_options, st_opt_topology)) {
const char *device = NULL;
const char *reason = op->result.exit_reason;
/* We own the op, and it is complete. broadcast the result to all nodes
* and notify our local clients. */
if (op->state == st_done) {
finalize_op(op, msg, false);
return;
}
device = crm_element_value(msg, PCMK__XA_ST_DEVICE_ID);
if ((op->phase == 2) && !pcmk__result_ok(&op->result)) {
/* A remapped "on" failed, but the node was already turned off
* successfully, so ignore the error and continue.
*/
crm_warn("Ignoring %s 'on' failure (%s%s%s) targeting %s "
"after successful 'off'",
device, pcmk_exec_status_str(op->result.execution_status),
(reason == NULL)? "" : ": ",
(reason == NULL)? "" : reason,
op->target);
pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
} else {
crm_notice("Action '%s' targeting %s%s%s on behalf of %s@%s: "
"%s%s%s%s",
op->action, op->target,
((device == NULL)? "" : " using "),
((device == NULL)? "" : device),
op->client_name,
op->originator,
pcmk_exec_status_str(op->result.execution_status),
(reason == NULL)? "" : " (",
(reason == NULL)? "" : reason,
(reason == NULL)? "" : ")");
}
if (pcmk__result_ok(&op->result)) {
/* An operation completed successfully. Try another device if
* necessary, otherwise mark the operation as done. */
advance_topology_device_in_level(op, device, msg);
return;
} else {
/* This device failed, time to try another topology level. If no other
* levels are available, mark this operation as failed and report results. */
if (advance_topology_level(op, false) != pcmk_rc_ok) {
op->state = st_failed;
finalize_op(op, msg, false);
return;
}
}
} else if (pcmk__result_ok(&op->result) && (op->devices == NULL)) {
op->state = st_done;
finalize_op(op, msg, false);
return;
} else if ((op->result.execution_status == PCMK_EXEC_TIMEOUT)
&& (op->devices == NULL)) {
/* If the operation timed out don't bother retrying other peers. */
op->state = st_failed;
finalize_op(op, msg, false);
return;
} else {
/* fall-through and attempt other fencing action using another peer */
}
/* Retry on failure */
crm_trace("Next for %s on behalf of %s@%s (result was: %s)",
op->target, op->originator, op->client_name,
pcmk_exec_status_str(op->result.execution_status));
request_peer_fencing(op, NULL);
}
gboolean
stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
{
GHashTableIter iter;
time_t now = time(NULL);
remote_fencing_op_t *rop = NULL;
if (tolerance <= 0 || !stonith_remote_op_list || target == NULL ||
action == NULL) {
return FALSE;
}
g_hash_table_iter_init(&iter, stonith_remote_op_list);
while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
if (strcmp(rop->target, target) != 0) {
continue;
} else if (rop->state != st_done) {
continue;
/* We don't have to worry about remapped reboots here
* because if state is done, any remapping has been undone
*/
} else if (strcmp(rop->action, action) != 0) {
continue;
} else if ((rop->completed + tolerance) < now) {
continue;
}
crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
target, action, tolerance, rop->delegate, rop->originator);
return TRUE;
}
return FALSE;
}
diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h
index 2d8047cd68..50c61582df 100644
--- a/daemons/fenced/pacemaker-fenced.h
+++ b/daemons/fenced/pacemaker-fenced.h
@@ -1,330 +1,330 @@
/*
* Copyright 2009-2024 the Pacemaker project contributors
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <stdint.h> // uint32_t, uint64_t
#include <libxml/tree.h> // xmlNode
#include <crm/common/mainloop.h>
#include <crm/cluster.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
/*!
* \internal
* \brief Check whether target has already been fenced recently
*
* \param[in] tolerance Number of seconds to look back in time
* \param[in] target Name of node to search for
* \param[in] action Action we want to match
*
* \return TRUE if an equivalent fencing operation took place in the last
* \p tolerance seconds, FALSE otherwise
*/
gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action);
typedef struct stonith_device_s {
char *id;
char *agent;
char *namespace;
/*! list of actions that must execute on the target node. Used for unfencing */
GString *on_target_actions;
GList *targets;
time_t targets_age;
gboolean has_attr_map;
// Whether target's nodeid should be passed as a parameter to the agent
gboolean include_nodeid;
/* whether the cluster should automatically unfence nodes with the device */
gboolean automatic_unfencing;
guint priority;
uint32_t flags; // Group of enum st_device_flags
GHashTable *params;
GHashTable *aliases;
GList *pending_ops;
mainloop_timer_t *timer;
crm_trigger_t *work;
xmlNode *agent_metadata;
/*! A verified device is one that has contacted the
* agent successfully to perform a monitor operation */
gboolean verified;
gboolean cib_registered;
gboolean api_registered;
gboolean dirty;
} stonith_device_t;
/* These values are used to index certain arrays by "phase". Usually an
* operation has only one "phase", so phase is always zero. However, some
* reboots are remapped to "off" then "on", in which case "reboot" will be
* phase 0, "off" will be phase 1 and "on" will be phase 2.
*/
enum st_remap_phase {
st_phase_requested = 0,
st_phase_off = 1,
st_phase_on = 2,
st_phase_max = 3
};
typedef struct remote_fencing_op_s {
/* The unique id associated with this operation */
char *id;
/*! The node this operation will fence */
char *target;
/*! The fencing action to perform on the target. (reboot, on, off) */
char *action;
/*! When was the fencing action recorded (seconds since epoch) */
time_t created;
/*! Marks if the final notifications have been sent to local stonith clients. */
gboolean notify_sent;
/*! The number of query replies received */
guint replies;
/*! The number of query replies expected */
guint replies_expected;
/*! Does this node own control of this operation */
gboolean owner;
/*! After query is complete, This the high level timer that expires the entire operation */
guint op_timer_total;
/*! This timer expires the current fencing request. Many fencing
* requests may exist in a single operation */
guint op_timer_one;
/*! This timer expires the query request sent out to determine
* what nodes are contain what devices, and who those devices can fence */
guint query_timer;
/*! This is the default timeout to use for each fencing device if no
* custom timeout is received in the query. */
gint base_timeout;
/*! This is the calculated total timeout an operation can take before
* expiring. This is calculated by adding together all the timeout
* values associated with the devices this fencing operation may call */
gint total_timeout;
/*!
* Fencing delay (in seconds) requested by API client (used by controller to
* implement \c PCMK_OPT_PRIORITY_FENCING_DELAY). A value of -1 means
* disable all configured delays.
*/
int client_delay;
/*! Delegate is the node being asked to perform a fencing action
* on behalf of the node that owns the remote operation. Some operations
* will involve multiple delegates. This value represents the final delegate
* that is used. */
char *delegate;
/*! The point at which the remote operation completed */
time_t completed;
//! Group of enum stonith_call_options associated with this operation
uint32_t call_options;
/*! The current state of the remote operation. This indicates
* what stage the op is in, query, exec, done, duplicate, failed. */
enum op_state state;
/*! The node that owns the remote operation */
char *originator;
/*! The local client id that initiated the fencing request */
char *client_id;
/*! The client's call_id that initiated the fencing request */
int client_callid;
/*! The name of client that initiated the fencing request */
char *client_name;
/*! List of the received query results for all the nodes in the cpg group */
GList *query_results;
/*! The original request that initiated the remote stonith operation */
xmlNode *request;
/*! The current topology level being executed */
guint level;
/*! The current operation phase being executed */
enum st_remap_phase phase;
/*! Devices with automatic unfencing (always run if "on" requested, never if remapped) */
GList *automatic_list;
/*! List of all devices at the currently executing topology level */
GList *devices_list;
/*! Current entry in the topology device list */
GList *devices;
/*! List of duplicate operations attached to this operation. Once this operation
* completes, the duplicate operations will be closed out as well. */
GList *duplicates;
/*! The point at which the remote operation completed(nsec) */
long long completed_nsec;
/*! The (potentially intermediate) result of the operation */
pcmk__action_result_t result;
} remote_fencing_op_t;
void fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged);
// Fencer-specific client flags
enum st_client_flags {
st_callback_unknown = UINT64_C(0),
st_callback_notify_fence = (UINT64_C(1) << 0),
st_callback_device_add = (UINT64_C(1) << 2),
st_callback_device_del = (UINT64_C(1) << 4),
st_callback_notify_history = (UINT64_C(1) << 5),
st_callback_notify_history_synced = (UINT64_C(1) << 6)
};
// How the user specified the target of a topology level
enum fenced_target_by {
fenced_target_by_unknown = -1, // Invalid or not yet parsed
fenced_target_by_name, // By target name
fenced_target_by_pattern, // By a pattern matching target names
fenced_target_by_attribute, // By a node attribute/value on target
};
/*
* Complex fencing requirements are specified via fencing topologies.
* A topology consists of levels; each level is a list of fencing devices.
* Topologies are stored in a hash table by node name. When a node needs to be
* fenced, if it has an entry in the topology table, the levels are tried
* sequentially, and the devices in each level are tried sequentially.
* Fencing is considered successful as soon as any level succeeds;
* a level is considered successful if all its devices succeed.
* Essentially, all devices at a given level are "and-ed" and the
* levels are "or-ed".
*
* This structure is used for the topology table entries.
* Topology levels start from 1, so levels[0] is unused and always NULL.
*/
typedef struct stonith_topology_s {
enum fenced_target_by kind; // How target was specified
/*! Node name regex or attribute name=value for which topology applies */
char *target;
char *target_value;
char *target_pattern;
char *target_attribute;
/*! Names of fencing devices at each topology level */
- GList *levels[ST_LEVEL_MAX];
+ GList *levels[ST__LEVEL_COUNT];
} stonith_topology_t;
void stonith_shutdown(int nsig);
void init_device_list(void);
void free_device_list(void);
void init_topology_list(void);
void free_topology_list(void);
void free_stonith_remote_op_list(void);
void init_stonith_remote_op_hash_table(GHashTable **table);
void free_metadata_cache(void);
void fenced_unregister_handlers(void);
uint64_t get_stonith_flag(const char *name);
void stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags,
xmlNode *op_request, const char *remote_peer);
int stonith_device_register(xmlNode *msg, gboolean from_cib);
void stonith_device_remove(const char *id, bool from_cib);
char *stonith_level_key(const xmlNode *msg, enum fenced_target_by);
void fenced_register_level(xmlNode *msg, char **desc,
pcmk__action_result_t *result);
void fenced_unregister_level(xmlNode *msg, char **desc,
pcmk__action_result_t *result);
stonith_topology_t *find_topology_for_host(const char *host);
void do_local_reply(const xmlNode *notify_src, pcmk__client_t *client,
int call_options);
xmlNode *fenced_construct_reply(const xmlNode *request, xmlNode *data,
const pcmk__action_result_t *result);
void
do_stonith_async_timeout_update(const char *client, const char *call_id, int timeout);
void fenced_send_notification(const char *type,
const pcmk__action_result_t *result,
xmlNode *data);
void fenced_send_config_notification(const char *op,
const pcmk__action_result_t *result,
const char *desc);
remote_fencing_op_t *initiate_remote_stonith_op(const pcmk__client_t *client,
xmlNode *request,
gboolean manual_ack);
void fenced_process_fencing_reply(xmlNode *msg);
int process_remote_stonith_query(xmlNode * msg);
void *create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer);
void stonith_fence_history(xmlNode *msg, xmlNode **output,
const char *remote_peer, int options);
void stonith_fence_history_trim(void);
bool fencing_peer_active(crm_node_t *peer);
void set_fencing_completed(remote_fencing_op_t * op);
int fenced_handle_manual_confirmation(const pcmk__client_t *client,
xmlNode *msg);
const char *fenced_device_reboot_action(const char *device_id);
bool fenced_device_supports_on(const char *device_id);
gboolean node_has_attr(const char *node, const char *name, const char *value);
gboolean node_does_watchdog_fencing(const char *node);
void fencing_topology_init(void);
void setup_cib(void);
void fenced_cib_cleanup(void);
int fenced_scheduler_init(void);
void fenced_scheduler_cleanup(void);
void fenced_scheduler_run(xmlNode *cib);
static inline void
fenced_set_protocol_error(pcmk__action_result_t *result)
{
pcmk__set_result(result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID,
"Fencer API request missing required information (bug?)");
}
/*!
* \internal
* \brief Get the device flag to use with a given action when searching devices
*
* \param[in] action Action to check
*
* \return st_device_supports_on if \p action is "on", otherwise
* st_device_supports_none
*/
static inline uint32_t
fenced_support_flag(const char *action)
{
if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
return st_device_supports_on;
}
return st_device_supports_none;
}
extern char *stonith_our_uname;
extern gboolean stand_alone;
extern GHashTable *device_list;
extern GHashTable *topology;
extern long long stonith_watchdog_timeout_ms;
extern GList *stonith_watchdog_targets;
extern GHashTable *stonith_remote_op_list;
extern crm_exit_t exit_code;
extern gboolean stonith_shutdown_flag;
diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h
index 414086dc4b..55ba89107d 100644
--- a/include/crm/fencing/internal.h
+++ b/include/crm/fencing/internal.h
@@ -1,166 +1,170 @@
/*
* Copyright 2011-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef STONITH_NG_INTERNAL__H
# define STONITH_NG_INTERNAL__H
# include <glib.h>
# include <crm/common/ipc.h>
# include <crm/common/xml.h>
# include <crm/common/output_internal.h>
# include <crm/stonith-ng.h>
enum st_device_flags {
st_device_supports_none = (0 << 0),
st_device_supports_list = (1 << 0),
st_device_supports_status = (1 << 1),
st_device_supports_reboot = (1 << 2),
st_device_supports_parameter_plug = (1 << 3),
st_device_supports_parameter_port = (1 << 4),
st_device_supports_on = (1 << 5),
};
#define stonith__set_device_flags(device_flags, device_id, flags_to_set) do { \
device_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Fence device", device_id, \
(device_flags), (flags_to_set), \
#flags_to_set); \
} while (0)
#define stonith__set_call_options(st_call_opts, call_for, flags_to_set) do { \
st_call_opts = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Fencer call", (call_for), \
(st_call_opts), (flags_to_set), \
#flags_to_set); \
} while (0)
#define stonith__clear_call_options(st_call_opts, call_for, flags_to_clear) do { \
st_call_opts = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Fencer call", (call_for), \
(st_call_opts), (flags_to_clear), \
#flags_to_clear); \
} while (0)
struct stonith_action_s;
typedef struct stonith_action_s stonith_action_t;
stonith_action_t *stonith__action_create(const char *agent,
const char *action_name,
const char *target,
uint32_t target_nodeid,
int timeout_sec,
GHashTable *device_args,
GHashTable *port_map,
const char *host_arg);
void stonith__destroy_action(stonith_action_t *action);
pcmk__action_result_t *stonith__action_result(stonith_action_t *action);
int stonith__result2rc(const pcmk__action_result_t *result);
void stonith__xe_set_result(xmlNode *xml, const pcmk__action_result_t *result);
void stonith__xe_get_result(const xmlNode *xml, pcmk__action_result_t *result);
xmlNode *stonith__find_xe_with_result(xmlNode *xml);
int stonith__execute_async(stonith_action_t *action, void *userdata,
void (*done) (int pid,
const pcmk__action_result_t *result,
void *user_data),
void (*fork_cb) (int pid, void *user_data));
int stonith__metadata_async(const char *agent, int timeout_sec,
void (*callback)(int pid,
const pcmk__action_result_t *result,
void *user_data),
void *user_data);
xmlNode *create_level_registration_xml(const char *node, const char *pattern,
const char *attr, const char *value,
int level,
const stonith_key_value_t *device_list);
xmlNode *create_device_registration_xml(const char *id,
enum stonith_namespace namespace,
const char *agent,
const stonith_key_value_t *params,
const char *rsc_provides);
void stonith__register_messages(pcmk__output_t *out);
GList *stonith__parse_targets(const char *hosts);
const char *stonith__later_succeeded(const stonith_history_t *event,
const stonith_history_t *top_history);
stonith_history_t *stonith__sort_history(stonith_history_t *history);
void stonith__device_parameter_flags(uint32_t *device_flags,
const char *device_name,
xmlNode *metadata);
-# define ST_LEVEL_MAX 10
+/* Only 1-9 is allowed for fencing topology levels,
+ * however, 0 is used to unregister all levels in
+ * unregister requests.
+ */
+# define ST__LEVEL_COUNT 10
// @COMPAT Deprecated since 1.1.17 (and see T773 to drop it)
# define STONITH_ATTR_ACTION_OP "action"
# define STONITH_OP_EXEC "st_execute"
# define STONITH_OP_TIMEOUT_UPDATE "st_timeout_update"
# define STONITH_OP_QUERY "st_query"
# define STONITH_OP_FENCE "st_fence"
# define STONITH_OP_RELAY "st_relay"
# define STONITH_OP_DEVICE_ADD "st_device_register"
# define STONITH_OP_DEVICE_DEL "st_device_remove"
# define STONITH_OP_FENCE_HISTORY "st_fence_history"
# define STONITH_OP_LEVEL_ADD "st_level_add"
# define STONITH_OP_LEVEL_DEL "st_level_remove"
# define STONITH_OP_NOTIFY "st_notify"
# define STONITH_OP_POKE "poke"
# define STONITH_WATCHDOG_AGENT "fence_watchdog"
/* Don't change 2 below as it would break rolling upgrade */
# define STONITH_WATCHDOG_AGENT_INTERNAL "#watchdog"
# define STONITH_WATCHDOG_ID "watchdog"
stonith_history_t *stonith__first_matching_event(stonith_history_t *history,
bool (*matching_fn)(stonith_history_t *, void *),
void *user_data);
bool stonith__event_state_pending(stonith_history_t *history, void *user_data);
bool stonith__event_state_eq(stonith_history_t *history, void *user_data);
bool stonith__event_state_neq(stonith_history_t *history, void *user_data);
int stonith__legacy2status(int rc);
int stonith__exit_status(const stonith_callback_data_t *data);
int stonith__execution_status(const stonith_callback_data_t *data);
const char *stonith__exit_reason(const stonith_callback_data_t *data);
int stonith__event_exit_status(const stonith_event_t *event);
int stonith__event_execution_status(const stonith_event_t *event);
const char *stonith__event_exit_reason(const stonith_event_t *event);
char *stonith__event_description(const stonith_event_t *event);
gchar *stonith__history_description(const stonith_history_t *event,
bool full_history,
const char *later_succeeded,
uint32_t show_opts);
/*!
* \internal
* \brief Is a fencing operation in pending state?
*
* \param[in] state State as enum op_state value
*
* \return A boolean
*/
static inline bool
stonith__op_state_pending(enum op_state state)
{
return state != st_failed && state != st_done;
}
gboolean stonith__watchdog_fencing_enabled_for_node(const char *node);
gboolean stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node);
#endif
diff --git a/include/crm_internal.h b/include/crm_internal.h
index 590f88cc7c..358aad6bf9 100644
--- a/include/crm_internal.h
+++ b/include/crm_internal.h
@@ -1,87 +1,94 @@
/*
* Copyright 2006-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM_INTERNAL__H
# define CRM_INTERNAL__H
# ifndef PCMK__CONFIG_H
# define PCMK__CONFIG_H
# include <config.h>
# endif
# include <portability.h>
/* Our minimum glib dependency is 2.42. Define that as both the minimum and
* maximum glib APIs that are allowed (i.e. APIs that were already deprecated
* in 2.42, and APIs introduced after 2.42, cannot be used by Pacemaker code).
*/
#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_42
#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_42
# include <glib.h>
# include <stdbool.h>
# include <libxml/tree.h>
/* Public API headers can guard including deprecated API headers with this
* symbol, thus preventing internal code (which includes this header) from using
* deprecated APIs, while still allowing external code to use them by default.
*/
#define PCMK_ALLOW_DEPRECATED 0
# include <crm/lrmd.h>
# include <crm/cluster/internal.h>
# include <crm/common/logging.h>
# include <crm/common/logging_internal.h>
# include <crm/common/ipc_internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
# include <crm/common/scheduler_internal.h>
# include <crm/common/schemas_internal.h>
# include <crm/common/xml_internal.h>
# include <crm/common/xml_io_internal.h>
# include <crm/common/xml_names_internal.h>
# include <crm/common/internal.h>
# include <locale.h>
# include <gettext.h>
#define N_(String) (String)
#ifdef ENABLE_NLS
# define _(String) gettext(String)
#else
# define _(String) (String)
#endif
/*
* IPC service names that are only used internally
*/
# define PCMK__SERVER_BASED_RO "cib_ro"
# define PCMK__SERVER_BASED_RW "cib_rw"
# define PCMK__SERVER_BASED_SHM "cib_shm"
/*
* IPC commands that can be sent to Pacemaker daemons
*/
#define PCMK__ATTRD_CMD_PEER_REMOVE "peer-remove"
#define PCMK__ATTRD_CMD_UPDATE "update"
#define PCMK__ATTRD_CMD_UPDATE_BOTH "update-both"
#define PCMK__ATTRD_CMD_UPDATE_DELAY "update-delay"
#define PCMK__ATTRD_CMD_QUERY "query"
#define PCMK__ATTRD_CMD_REFRESH "refresh"
#define PCMK__ATTRD_CMD_FLUSH "flush"
#define PCMK__ATTRD_CMD_SYNC_RESPONSE "sync-response"
#define PCMK__ATTRD_CMD_CLEAR_FAILURE "clear-failure"
#define PCMK__ATTRD_CMD_CONFIRM "confirm"
#define PCMK__CONTROLD_CMD_NODES "list-nodes"
-#endif /* CRM_INTERNAL__H */
+#define ST__LEVEL_MIN 1
+#define ST__LEVEL_MAX 9
+
+#ifdef __cplusplus
+
+#endif
+
+#endif // CRM_INTERNAL__H
diff --git a/lib/pengine/pe_status_private.h b/lib/pengine/pe_status_private.h
index 309f0b7629..9cdd2ec115 100644
--- a/lib/pengine/pe_status_private.h
+++ b/lib/pengine/pe_status_private.h
@@ -1,150 +1,154 @@
/*
* Copyright 2018-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_STATUS_PRIVATE__H
# define PE_STATUS_PRIVATE__H
/* This header is for the sole use of libpe_status, so that functions can be
* declared with G_GNUC_INTERNAL for efficiency.
*/
#if defined(PCMK__UNIT_TESTING)
#undef G_GNUC_INTERNAL
#define G_GNUC_INTERNAL
#endif
#include <glib.h> // GSList, GList, GHashTable
#include <libxml/tree.h> // xmlNode
#include <crm/pengine/status.h> // pcmk_action_t, pcmk_resource_t, etc.
/*!
* \internal
* \deprecated This macro will be removed in a future release
*/
# define status_print(fmt, args...) \
if(options & pe_print_html) { \
FILE *stream = print_data; \
fprintf(stream, fmt, ##args); \
} else if(options & pe_print_printf || options & pe_print_ncurses) { \
FILE *stream = print_data; \
fprintf(stream, fmt, ##args); \
} else if(options & pe_print_xml) { \
FILE *stream = print_data; \
fprintf(stream, fmt, ##args); \
} else if(options & pe_print_log) { \
int log_level = *(int*)print_data; \
do_crm_log(log_level, fmt, ##args); \
}
typedef struct notify_data_s {
GSList *keys; // Environment variable name/value pairs
const char *action;
pcmk_action_t *pre;
pcmk_action_t *post;
pcmk_action_t *pre_done;
pcmk_action_t *post_done;
GList *active; /* notify_entry_t* */
GList *inactive; /* notify_entry_t* */
GList *start; /* notify_entry_t* */
GList *stop; /* notify_entry_t* */
GList *demote; /* notify_entry_t* */
GList *promote; /* notify_entry_t* */
GList *promoted; /* notify_entry_t* */
GList *unpromoted; /* notify_entry_t* */
GHashTable *allowed_nodes;
} notify_data_t;
G_GNUC_INTERNAL
pcmk_resource_t *pe__create_clone_child(pcmk_resource_t *rsc,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pe__create_action_notifications(pcmk_resource_t *rsc,
notify_data_t *n_data);
G_GNUC_INTERNAL
void pe__free_action_notification_data(notify_data_t *n_data);
G_GNUC_INTERNAL
notify_data_t *pe__action_notif_pseudo_ops(pcmk_resource_t *rsc,
const char *task,
pcmk_action_t *action,
pcmk_action_t *complete);
G_GNUC_INTERNAL
void pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b);
G_GNUC_INTERNAL
gboolean pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
pcmk_resource_t *parent,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_remote_nodes(xmlNode *xml_resources,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_resources(const xmlNode *xml_resources,
pcmk_scheduler_t *scheduler);
+G_GNUC_INTERNAL
+void pcmk__unpack_fencing_topology(const xmlNode *xml_fencing_topology,
+ pcmk_scheduler_t *scheduler);
+
G_GNUC_INTERNAL
gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
pcmk__op_digest_t *pe__compare_fencing_digest(pcmk_resource_t *rsc,
const char *agent,
pcmk_node_t *node,
pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
void pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler);
// Primitive resource methods
G_GNUC_INTERNAL
unsigned int pe__primitive_max_per_node(const pcmk_resource_t *rsc);
// Group resource methods
G_GNUC_INTERNAL
unsigned int pe__group_max_per_node(const pcmk_resource_t *rsc);
// Clone resource methods
G_GNUC_INTERNAL
unsigned int pe__clone_max_per_node(const pcmk_resource_t *rsc);
// Bundle resource methods
G_GNUC_INTERNAL
pcmk_node_t *pe__bundle_active_node(const pcmk_resource_t *rsc,
unsigned int *count_all,
unsigned int *count_clean);
G_GNUC_INTERNAL
unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc);
#endif // PE_STATUS_PRIVATE__H
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index 36209da734..1a1390d95c 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,519 +1,523 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/common/xml.h>
#include <crm/common/cib_internal.h>
#include <glib.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
/*!
* \brief Create a new object to hold scheduler data
*
* \return New, initialized scheduler data on success, else NULL (and set errno)
* \note Only pcmk_scheduler_t objects created with this function (as opposed
* to statically declared or directly allocated) should be used with the
* functions in this library, to allow for future extensions to the
* data type. The caller is responsible for freeing the memory with
* pe_free_working_set() when the instance is no longer needed.
*/
pcmk_scheduler_t *
pe_new_working_set(void)
{
pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
if (scheduler != NULL) {
set_working_set_defaults(scheduler);
}
return scheduler;
}
/*!
* \brief Free scheduler data
*
* \param[in,out] scheduler Scheduler data to free
*/
void
pe_free_working_set(pcmk_scheduler_t *scheduler)
{
if (scheduler != NULL) {
pe_reset_working_set(scheduler);
scheduler->priv = NULL;
free(scheduler);
}
}
#define XPATH_DEPRECATED_RULES \
"//" PCMK_XE_OP_DEFAULTS "//" PCMK_XE_EXPRESSION \
"|//" PCMK_XE_OP "//" PCMK_XE_EXPRESSION
/*!
* \internal
* \brief Log a warning for deprecated rule syntax in operations
*
* \param[in] scheduler Scheduler data
*/
static void
check_for_deprecated_rules(pcmk_scheduler_t *scheduler)
{
// @COMPAT Drop this function when support for the syntax is dropped
xmlNode *deprecated = get_xpath_object(XPATH_DEPRECATED_RULES,
scheduler->input, LOG_NEVER);
if (deprecated != NULL) {
pcmk__warn_once(pcmk__wo_op_attr_expr,
"Support for rules with node attribute expressions in "
PCMK_XE_OP " or " PCMK_XE_OP_DEFAULTS " is deprecated "
"and will be dropped in a future release");
}
}
/*
* Unpack everything
* At the end you'll have:
* - A list of nodes
* - A list of resources (each with any dependencies on other resources)
* - A list of constraints between resources and nodes
* - A list of constraints between start/stop actions
* - A list of nodes that need to be stonith'd
* - A list of nodes that need to be shutdown
* - A list of the possible stop/start actions (without dependencies)
*/
gboolean
cluster_status(pcmk_scheduler_t * scheduler)
{
const char *new_version = NULL;
xmlNode *section = NULL;
if ((scheduler == NULL) || (scheduler->input == NULL)) {
return FALSE;
}
new_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET);
if (pcmk__check_feature_set(new_version) != pcmk_rc_ok) {
pcmk__config_err("Can't process CIB with feature set '%s' greater than our own '%s'",
new_version, CRM_FEATURE_SET);
return FALSE;
}
crm_trace("Beginning unpack");
if (scheduler->failed != NULL) {
free_xml(scheduler->failed);
}
scheduler->failed = pcmk__xe_create(NULL, "failed-ops");
if (scheduler->now == NULL) {
scheduler->now = crm_time_new(NULL);
}
if (scheduler->dc_uuid == NULL) {
scheduler->dc_uuid = crm_element_value_copy(scheduler->input,
PCMK_XA_DC_UUID);
}
if (pcmk__xe_attr_is_true(scheduler->input, PCMK_XA_HAVE_QUORUM)) {
pcmk__set_scheduler_flags(scheduler, pcmk_sched_quorate);
} else {
pcmk__clear_scheduler_flags(scheduler, pcmk_sched_quorate);
}
scheduler->op_defaults = get_xpath_object("//" PCMK_XE_OP_DEFAULTS,
scheduler->input, LOG_NEVER);
check_for_deprecated_rules(scheduler);
scheduler->rsc_defaults = get_xpath_object("//" PCMK_XE_RSC_DEFAULTS,
scheduler->input, LOG_NEVER);
section = get_xpath_object("//" PCMK_XE_CRM_CONFIG, scheduler->input,
LOG_TRACE);
unpack_config(section, scheduler);
if (!pcmk_any_flags_set(scheduler->flags,
pcmk_sched_location_only|pcmk_sched_quorate)
&& (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
pcmk__sched_warn("Fencing and resource management disabled "
"due to lack of quorum");
}
section = get_xpath_object("//" PCMK_XE_NODES, scheduler->input, LOG_TRACE);
unpack_nodes(section, scheduler);
section = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input,
LOG_TRACE);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
unpack_remote_nodes(section, scheduler);
}
unpack_resources(section, scheduler);
+ section = get_xpath_object("//" PCMK_XE_FENCING_TOPOLOGY, scheduler->input,
+ LOG_TRACE);
+ pcmk__unpack_fencing_topology(section, scheduler);
+
section = get_xpath_object("//" PCMK_XE_TAGS, scheduler->input, LOG_NEVER);
unpack_tags(section, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
section = get_xpath_object("//" PCMK_XE_STATUS, scheduler->input,
LOG_TRACE);
unpack_status(section, scheduler);
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_no_counts)) {
for (GList *item = scheduler->resources; item != NULL;
item = item->next) {
((pcmk_resource_t *) (item->data))->fns->count(item->data);
}
crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
scheduler->ninstances, scheduler->disabled_resources,
scheduler->blocked_resources);
}
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_status);
return TRUE;
}
/*!
* \internal
* \brief Free a list of pcmk_resource_t
*
* \param[in,out] resources List to free
*
* \note When the scheduler's resource list is freed, that includes the original
* storage for the uname and id of any Pacemaker Remote nodes in the
* scheduler's node list, so take care not to use those afterward.
* \todo Refactor pcmk_node_t to strdup() the node name.
*/
static void
pe_free_resources(GList *resources)
{
pcmk_resource_t *rsc = NULL;
GList *iterator = resources;
while (iterator != NULL) {
rsc = (pcmk_resource_t *) iterator->data;
iterator = iterator->next;
rsc->fns->free(rsc);
}
if (resources != NULL) {
g_list_free(resources);
}
}
static void
pe_free_actions(GList *actions)
{
GList *iterator = actions;
while (iterator != NULL) {
pe_free_action(iterator->data);
iterator = iterator->next;
}
if (actions != NULL) {
g_list_free(actions);
}
}
static void
pe_free_nodes(GList *nodes)
{
for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
pcmk_node_t *node = (pcmk_node_t *) iterator->data;
// Shouldn't be possible, but to be safe ...
if (node == NULL) {
continue;
}
if (node->details == NULL) {
free(node);
continue;
}
/* This is called after pe_free_resources(), which means that we can't
* use node->details->uname for Pacemaker Remote nodes.
*/
crm_trace("Freeing node %s", (pcmk__is_pacemaker_remote_node(node)?
"(guest or remote)" : pcmk__node_name(node)));
if (node->details->attrs != NULL) {
g_hash_table_destroy(node->details->attrs);
}
if (node->details->utilization != NULL) {
g_hash_table_destroy(node->details->utilization);
}
if (node->details->digest_cache != NULL) {
g_hash_table_destroy(node->details->digest_cache);
}
g_list_free(node->details->running_rsc);
g_list_free(node->details->allocated_rsc);
free(node->details);
free(node);
}
if (nodes != NULL) {
g_list_free(nodes);
}
}
static void
pe__free_ordering(GList *constraints)
{
GList *iterator = constraints;
while (iterator != NULL) {
pcmk__action_relation_t *order = iterator->data;
iterator = iterator->next;
free(order->task1);
free(order->task2);
free(order);
}
if (constraints != NULL) {
g_list_free(constraints);
}
}
static void
pe__free_location(GList *constraints)
{
GList *iterator = constraints;
while (iterator != NULL) {
pcmk__location_t *cons = iterator->data;
iterator = iterator->next;
g_list_free_full(cons->nodes, free);
free(cons->id);
free(cons);
}
if (constraints != NULL) {
g_list_free(constraints);
}
}
/*!
* \brief Reset scheduler data to defaults without freeing it or constraints
*
* \param[in,out] scheduler Scheduler data to reset
*
* \deprecated This function is deprecated as part of the API;
* pe_reset_working_set() should be used instead.
*/
void
cleanup_calculations(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return;
}
pcmk__clear_scheduler_flags(scheduler, pcmk_sched_have_status);
if (scheduler->config_hash != NULL) {
g_hash_table_destroy(scheduler->config_hash);
}
if (scheduler->singletons != NULL) {
g_hash_table_destroy(scheduler->singletons);
}
if (scheduler->tickets) {
g_hash_table_destroy(scheduler->tickets);
}
if (scheduler->template_rsc_sets) {
g_hash_table_destroy(scheduler->template_rsc_sets);
}
if (scheduler->tags) {
g_hash_table_destroy(scheduler->tags);
}
free(scheduler->dc_uuid);
crm_trace("deleting resources");
pe_free_resources(scheduler->resources);
crm_trace("deleting actions");
pe_free_actions(scheduler->actions);
crm_trace("deleting nodes");
pe_free_nodes(scheduler->nodes);
pe__free_param_checks(scheduler);
g_list_free(scheduler->stop_needed);
free_xml(scheduler->graph);
crm_time_free(scheduler->now);
free_xml(scheduler->input);
free_xml(scheduler->failed);
set_working_set_defaults(scheduler);
CRM_CHECK(scheduler->ordering_constraints == NULL,;
);
CRM_CHECK(scheduler->placement_constraints == NULL,;
);
}
/*!
* \brief Reset scheduler data to default state without freeing it
*
* \param[in,out] scheduler Scheduler data to reset
*/
void
pe_reset_working_set(pcmk_scheduler_t *scheduler)
{
if (scheduler == NULL) {
return;
}
crm_trace("Deleting %d ordering constraints",
g_list_length(scheduler->ordering_constraints));
pe__free_ordering(scheduler->ordering_constraints);
scheduler->ordering_constraints = NULL;
crm_trace("Deleting %d location constraints",
g_list_length(scheduler->placement_constraints));
pe__free_location(scheduler->placement_constraints);
scheduler->placement_constraints = NULL;
crm_trace("Deleting %d colocation constraints",
g_list_length(scheduler->colocation_constraints));
g_list_free_full(scheduler->colocation_constraints, free);
scheduler->colocation_constraints = NULL;
crm_trace("Deleting %d ticket constraints",
g_list_length(scheduler->ticket_constraints));
g_list_free_full(scheduler->ticket_constraints, free);
scheduler->ticket_constraints = NULL;
cleanup_calculations(scheduler);
}
void
set_working_set_defaults(pcmk_scheduler_t *scheduler)
{
void *priv = scheduler->priv;
memset(scheduler, 0, sizeof(pcmk_scheduler_t));
scheduler->priv = priv;
scheduler->order_id = 1;
scheduler->action_id = 1;
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
scheduler->flags = 0x0ULL;
pcmk__set_scheduler_flags(scheduler,
pcmk_sched_symmetric_cluster
|pcmk_sched_stop_removed_resources
|pcmk_sched_cancel_removed_actions);
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, PCMK_VALUE_TRUE)) {
pcmk__set_scheduler_flags(scheduler, pcmk_sched_concurrent_fencing);
}
}
pcmk_resource_t *
pe_find_resource(GList *rsc_list, const char *id)
{
return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
}
pcmk_resource_t *
pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
{
GList *rIter = NULL;
for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
pcmk_resource_t *parent = rIter->data;
pcmk_resource_t *match =
parent->fns->find_rsc(parent, id, NULL, flags);
if (match != NULL) {
return match;
}
}
crm_trace("No match for %s", id);
return NULL;
}
/*!
* \brief Find a node by name or ID in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id If not NULL, ID of node to find
* \param[in] node_name If not NULL, name of node to find
*
* \return Node from \p nodes that matches \p id if any,
* otherwise node from \p nodes that matches \p uname if any,
* otherwise NULL
*/
pcmk_node_t *
pe_find_node_any(const GList *nodes, const char *id, const char *uname)
{
pcmk_node_t *match = NULL;
if (id != NULL) {
match = pe_find_node_id(nodes, id);
}
if ((match == NULL) && (uname != NULL)) {
match = pcmk__find_node_in_list(nodes, uname);
}
return match;
}
/*!
* \brief Find a node by ID in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id ID of node to find
*
* \return Node from \p nodes that matches \p id if any, otherwise NULL
*/
pcmk_node_t *
pe_find_node_id(const GList *nodes, const char *id)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* @TODO Whether node IDs should be considered case-sensitive should
* probably depend on the node type, so functionizing the comparison
* would be worthwhile
*/
if (pcmk__str_eq(node->details->id, id, pcmk__str_casei)) {
return node;
}
}
return NULL;
}
// Deprecated functions kept only for backward API compatibility
// LCOV_EXCL_START
#include <crm/pengine/status_compat.h>
/*!
* \brief Find a node by name in a list of nodes
*
* \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] node_name Name of node to find
*
* \return Node from \p nodes that matches \p node_name if any, otherwise NULL
*/
pcmk_node_t *
pe_find_node(const GList *nodes, const char *node_name)
{
return pcmk__find_node_in_list(nodes, node_name);
}
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index de623d4926..28867b878a 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,5166 +1,5202 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/services.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/common/util.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <pe_status_private.h>
CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
pcmk_resource_t *rsc; // Resource that history is for
pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
const char *id; // XML ID of history entry
const char *key; // Operation key of action
const char *task; // Action name
const char *exit_reason; // Exit reason given for result
guint interval_ms; // Action interval
int call_id; // Call ID of action
int expected_exit_status; // Expected exit status of action
int exit_status; // Actual exit status of action
int execution_status; // Execution status of action
};
/* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
* use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the
* flag is stringified more readably in log messages.
*/
#define set_config_flag(scheduler, option, flag) do { \
GHashTable *config_hash = (scheduler)->config_hash; \
const char *scf_value = pcmk__cluster_option(config_hash, (option)); \
\
if (scf_value != NULL) { \
if (crm_is_true(scf_value)) { \
(scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} else { \
(scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Scheduler", \
crm_system_name, (scheduler)->flags, \
(flag), #flag); \
} \
} \
} while(0)
static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
xmlNode *xml_op, xmlNode **last_failure,
enum action_fail_response *failed);
static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node);
static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler);
static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler);
static gboolean
is_dangling_guest_node(pcmk_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
* from both the config and the status section. */
if (pcmk__is_pacemaker_remote_node(node)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container == NULL)
&& pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_removed_filler)) {
return TRUE;
}
return FALSE;
}
/*!
* \brief Schedule a fence action for a node
*
* \param[in,out] scheduler Scheduler data
* \param[in,out] node Node to fence
* \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider
* \c PCMK_OPT_PRIORITY_FENCING_DELAY
*/
void
pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk_resource_t *rsc = node->details->remote_rsc->container;
if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
pcmk__node_name(node), reason, rsc->id);
} else {
pcmk__sched_warn("Guest node %s will be fenced "
"(by recovering its guest resource %s): %s",
pcmk__node_name(node), rsc->id, reason);
/* We don't mark the node as unclean because that would prevent the
* node from running resources. We want to allow it to run resources
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
}
} else if (is_dangling_guest_node(node)) {
crm_info("Cleaning up dangling connection for guest node %s: "
"fencing was already done because %s, "
"and guest resource no longer exists",
pcmk__node_name(node), reason);
pcmk__set_rsc_flags(node->details->remote_rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
} else if (pcmk__is_remote_node(node)) {
pcmk_resource_t *rsc = node->details->remote_rsc;
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pcmk__node_name(node), reason);
} else if(node->details->remote_requires_reset == FALSE) {
node->details->remote_requires_reset = TRUE;
pcmk__sched_warn("Remote node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes
pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
pcmk__sched_warn("Cluster node %s %s: %s",
pcmk__node_name(node),
pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
// @TODO xpaths can't handle templates, rules, or id-refs
// nvpair with provides or requires set to unfencing
#define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \
"[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \
"or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \
"and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']"
// unfencing in rsc_defaults or any resource
#define XPATH_ENABLE_UNFENCING \
"/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \
"//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \
"|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \
"/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR
static void
set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
if (!pcmk_is_set(scheduler->flags, flag)) {
result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
pcmk__set_scheduler_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
guint interval_ms = 0U;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
scheduler->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_data,
config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS,
FALSE, scheduler);
pcmk__validate_cluster_options(config_hash);
set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES,
pcmk_sched_probe_resources);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
" is nonzero");
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
scheduler);
value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
pcmk_parse_interval_spec(value, &interval_ms);
if (interval_ms >= INT_MAX) {
scheduler->stonith_timeout = INT_MAX;
} else {
scheduler->stonith_timeout = (int) interval_ms;
}
crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED,
pcmk_sched_fencing_enabled);
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
crm_debug("STONITH of failed nodes is enabled");
} else {
crm_debug("STONITH of failed nodes is disabled");
}
scheduler->stonith_action = pcmk__cluster_option(config_hash,
PCMK_OPT_STONITH_ACTION);
if (!strcmp(scheduler->stonith_action, PCMK__ACTION_POWEROFF)) {
pcmk__warn_once(pcmk__wo_poweroff,
"Support for " PCMK_OPT_STONITH_ACTION " of "
"'" PCMK__ACTION_POWEROFF "' is deprecated and will be "
"removed in a future release "
"(use '" PCMK_ACTION_OFF "' instead)");
scheduler->stonith_action = PCMK_ACTION_OFF;
}
crm_trace("STONITH will %s nodes", scheduler->stonith_action);
set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING,
pcmk_sched_concurrent_fencing);
if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
crm_debug("Concurrent fencing is enabled");
} else {
crm_debug("Concurrent fencing is disabled");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY);
if (value) {
pcmk_parse_interval_spec(value, &interval_ms);
scheduler->priority_fencing_delay = (int) (interval_ms / 1000);
crm_trace("Priority fencing delay is %ds",
scheduler->priority_fencing_delay);
}
set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES,
pcmk_sched_stop_all);
crm_debug("Stop all active resources: %s",
pcmk__flag_text(scheduler->flags, pcmk_sched_stop_all));
set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER,
pcmk_sched_symmetric_cluster);
if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY);
if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
scheduler->no_quorum_policy = pcmk_no_quorum_demote;
} else if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei)) {
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
int do_panic = 0;
crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC,
&do_panic);
if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop': cluster has never had quorum");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY
" to 'stop' because fencing is disabled");
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
switch (scheduler->no_quorum_policy) {
case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES,
pcmk_sched_stop_removed_resources);
if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
crm_trace("Orphan resources are stopped");
} else {
crm_trace("Orphan resources are ignored");
}
set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS,
pcmk_sched_cancel_removed_actions);
if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
crm_trace("Orphan resource actions are stopped");
} else {
crm_trace("Orphan resource actions are ignored");
}
value = pcmk__cluster_option(config_hash, PCMK__OPT_REMOVE_AFTER_STOP);
if (value != NULL) {
if (crm_is_true(value)) {
pcmk__set_scheduler_flags(scheduler, pcmk_sched_remove_after_stop);
pcmk__warn_once(pcmk__wo_remove_after,
"Support for the " PCMK__OPT_REMOVE_AFTER_STOP
" cluster property is deprecated and will be "
"removed in a future release");
} else {
pcmk__clear_scheduler_flags(scheduler,
pcmk_sched_remove_after_stop);
}
}
set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE,
pcmk_sched_in_maintenance);
crm_trace("Maintenance mode: %s",
pcmk__flag_text(scheduler->flags, pcmk_sched_in_maintenance));
set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL,
pcmk_sched_start_failure_fatal);
if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
crm_trace("Start failures are always fatal");
} else {
crm_trace("Start failures are handled by failcount");
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING,
pcmk_sched_startup_fencing);
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
pcmk__warn_once(pcmk__wo_blind,
"Blind faith: not fencing unseen nodes");
}
pe__unpack_node_health_scores(scheduler);
scheduler->placement_strategy =
pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY);
crm_trace("Placement strategy: %s", scheduler->placement_strategy);
set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK,
pcmk_sched_shutdown_lock);
if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT);
pcmk_parse_interval_spec(value, &(scheduler->shutdown_lock));
scheduler->shutdown_lock /= 1000;
crm_trace("Resources will be locked to nodes that were cleanly "
"shut down (locks expire after %s)",
pcmk__readable_interval(scheduler->shutdown_lock));
} else {
crm_trace("Resources will not be locked to nodes that were cleanly "
"shut down");
}
value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT);
pcmk_parse_interval_spec(value, &(scheduler->node_pending_timeout));
scheduler->node_pending_timeout /= 1000;
if (scheduler->node_pending_timeout == 0) {
crm_trace("Do not fence pending nodes");
} else {
crm_trace("Fence pending nodes after %s",
pcmk__readable_interval(scheduler->node_pending_timeout
* 1000));
}
return TRUE;
}
pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pcmk_scheduler_t *scheduler)
{
pcmk_node_t *new_node = NULL;
if (pcmk_find_node(scheduler, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
pcmk__sched_err("Could not allocate memory for node %s", uname);
return NULL;
}
new_node->weight = char2score(score);
new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
if (new_node->details == NULL) {
free(new_node);
pcmk__sched_err("Could not allocate memory for node %s", uname);
return NULL;
}
crm_trace("Creating node for entry %s/%s", uname, id);
new_node->details->id = id;
new_node->details->uname = uname;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
new_node->details->data_set = scheduler;
if (pcmk__str_eq(type, PCMK_VALUE_MEMBER,
pcmk__str_null_matches|pcmk__str_casei)) {
new_node->details->type = pcmk_node_variant_cluster;
} else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) {
new_node->details->type = pcmk_node_variant_remote;
pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
* should be changed to 'member' at a compatibility break
*/
if (!pcmk__str_eq(type, PCMK__VALUE_PING, pcmk__str_casei)) {
pcmk__config_warn("Node %s has unrecognized type '%s', "
"assuming '" PCMK__VALUE_PING "'",
pcmk__s(uname, "without name"), type);
}
pcmk__warn_once(pcmk__wo_ping_node,
"Support for nodes of type '" PCMK__VALUE_PING "' "
"(such as %s) is deprecated and will be removed in a "
"future release",
pcmk__s(uname, "unnamed node"));
new_node->details->type = node_ping;
}
new_node->details->attrs = pcmk__strkey_table(free, free);
if (pcmk__is_pacemaker_remote_node(new_node)) {
pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "remote");
} else {
pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "cluster");
}
new_node->details->utilization = pcmk__strkey_table(free, free);
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
pe__cmp_node_name);
return new_node;
}
static const char *
expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
const char *container_id = pcmk__xe_id(xml_obj);
const char *remote_name = NULL;
const char *remote_server = NULL;
const char *remote_port = NULL;
const char *connect_timeout = "60s";
const char *remote_allow_migrate=NULL;
const char *is_managed = NULL;
for (attr_set = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) {
if (!pcmk__xe_is(attr_set, PCMK_XE_META_ATTRIBUTES)) {
continue;
}
for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL);
attr != NULL; attr = pcmk__xe_next(attr)) {
const char *value = crm_element_value(attr, PCMK_XA_VALUE);
const char *name = crm_element_value(attr, PCMK_XA_NAME);
if (name == NULL) { // Sanity
continue;
}
if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) {
remote_name = value;
} else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) {
remote_server = value;
} else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) {
remote_port = value;
} else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) {
connect_timeout = value;
} else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) {
remote_allow_migrate = value;
} else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) {
is_managed = value;
}
}
}
if (remote_name == NULL) {
return NULL;
}
if (pe_find_resource(data->resources, remote_name) != NULL) {
return NULL;
}
pe_create_remote_xml(parent, remote_name, container_id,
remote_allow_migrate, is_managed,
connect_timeout, remote_server, remote_port);
return remote_name;
}
static void
handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
if ((new_node->details->type == pcmk_node_variant_remote)
&& (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
*/
return;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
} else {
// Blind faith ...
new_node->details->unclean = FALSE;
}
/* We need to be able to determine if a node's status section
* exists or not separate from whether the node is unclean. */
new_node->details->unseen = TRUE;
}
gboolean
unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
const char *score = NULL;
for (xml_obj = pcmk__xe_first_child(xml_nodes, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
if (pcmk__xe_is(xml_obj, PCMK_XE_NODE)) {
new_node = NULL;
id = crm_element_value(xml_obj, PCMK_XA_ID);
uname = crm_element_value(xml_obj, PCMK_XA_UNAME);
type = crm_element_value(xml_obj, PCMK_XA_TYPE);
score = crm_element_value(xml_obj, PCMK_XA_SCORE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
pcmk__config_err("Ignoring <" PCMK_XE_NODE
"> entry in configuration without id");
continue;
}
new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
handle_startup_fencing(scheduler, new_node);
add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s",
crm_element_value(xml_obj, PCMK_XA_UNAME));
}
}
if (scheduler->localhost
&& (pcmk_find_node(scheduler, scheduler->localhost) == NULL)) {
crm_info("Creating a fake local node");
pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
scheduler);
}
return TRUE;
}
static void
setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *container_id = NULL;
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) setup_container, scheduler);
return;
}
container_id = g_hash_table_lookup(rsc->meta, PCMK__META_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
pcmk_resource_t *container = pe_find_resource(scheduler->resources,
container_id);
if (container) {
rsc->container = container;
pcmk__set_rsc_flags(container, pcmk_rsc_has_filler);
container->fillers = g_list_append(container->fillers, rsc);
pcmk__rsc_trace(rsc, "Resource %s's container is %s",
rsc->id, container_id);
} else {
pcmk__config_err("Resource %s: Unknown resource container (%s)",
rsc->id, container_id);
}
}
}
gboolean
unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
/* Create remote nodes and guest nodes from the resource configuration
* before unpacking resources.
*/
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
const char *new_node_id = NULL;
/* Check for remote nodes, which are defined by ocf:pacemaker:remote
* primitives.
*/
if (xml_contains_remote_node(xml_obj)) {
new_node_id = pcmk__xe_id(xml_obj);
/* The pcmk_find_node() check ensures we don't iterate over an
* expanded node that has already been added to the node list
*/
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
continue;
}
/* Check for guest nodes, which are defined by special meta-attributes
* of a primitive of any type (for example, VirtualDomain or Xen).
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) {
/* This will add an ocf:pacemaker:remote primitive to the
* configuration for the guest node's connection, to be unpacked
* later.
*/
new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
continue;
}
/* Check for guest nodes inside a group. Clones are currently not
* supported as guest nodes.
*/
if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) {
xmlNode *xml_obj2 = NULL;
for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) {
new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
scheduler);
if (new_node_id
&& (pcmk_find_node(scheduler, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, pcmk__xe_id(xml_obj2),
pcmk__xe_id(xml_obj));
pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
NULL, scheduler);
}
}
}
}
return TRUE;
}
/* Call this after all the nodes and resources have been
* unpacked, but before the status section is read.
*
* A remote node's online status is reflected by the state
* of the remote node's connection resource. We need to link
* the remote node to this connection resource so we can have
* easy access to the connection resource during the scheduler calculations.
*/
static void
link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
pcmk_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
remote_node = pcmk_find_node(scheduler, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
new_rsc->id, pcmk__node_name(remote_node));
remote_node->details->remote_rsc = new_rsc;
if (new_rsc->container == NULL) {
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
* that we know the node is a guest node, update it correctly.
*/
pcmk__insert_dup(remote_node->details->attrs,
CRM_ATTR_KIND, "container");
}
}
static void
destroy_tag(gpointer data)
{
pcmk_tag_t *tag = data;
if (tag) {
free(tag->id);
g_list_free_full(tag->refs, free);
free(tag);
}
}
/*!
* \internal
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
* \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
* \note unpack_remote_nodes() MUST be called before this, so that the nodes can
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
pcmk_resource_t *new_rsc = NULL;
const char *id = pcmk__xe_id(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
xml_obj->name);
continue;
}
if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) {
if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
pcmk__insert_dup(scheduler->template_rsc_sets, id, NULL);
}
continue;
}
crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
scheduler) == pcmk_rc_ok) {
scheduler->resources = g_list_append(scheduler->resources, new_rsc);
pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
xml_obj->name, id);
}
}
for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
setup_container(rsc, scheduler);
link_rsc2remotenode(scheduler, rsc);
}
scheduler->resources = g_list_sort(scheduler->resources,
pe__cmp_rsc_priority);
if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* Ignore */
} else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the "
PCMK_OPT_STONITH_ENABLED " option");
pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
+/*!
+ * \internal
+ * \brief Parse configuration XML for fencing topology information
+ *
+ * \param[in] xml_fencing_topology Top of fencing topology configuration XML
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return void
+ */
+void
+pcmk__unpack_fencing_topology(const xmlNode *xml_fencing_topology, pcmk_scheduler_t *scheduler)
+{
+ xmlNode *xml_obj = NULL;
+ int id = 0;
+
+ for (xml_obj = pcmk__xe_first_child(xml_fencing_topology, PCMK_XE_FENCING_LEVEL, NULL, NULL);
+ xml_obj != NULL; xml_obj = pcmk__xe_next_same(xml_obj)) {
+
+ crm_element_value_int(xml_obj, PCMK_XA_INDEX, &id);
+
+ // Ensure an ID was given
+ if (pcmk__str_empty(pcmk__xe_id(xml_obj))) {
+ pcmk__config_warn("Ignoring registration for topology level without ID");
+ continue;
+ }
+
+ // Ensure level ID is in allowed range
+ if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) {
+ pcmk__config_warn("Ignoring topology registration with invalid level %d",
+ id);
+ continue;
+ }
+
+ }
+}
+
gboolean
unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
scheduler->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags, NULL, NULL, NULL);
xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) {
xmlNode *xml_obj_ref = NULL;
const char *tag_id = pcmk__xe_id(xml_tag);
if (!pcmk__xe_is(xml_tag, PCMK_XE_TAG)) {
continue;
}
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID,
(const char *) xml_tag->name);
continue;
}
for (xml_obj_ref = pcmk__xe_first_child(xml_tag, NULL, NULL, NULL);
xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
const char *obj_ref = pcmk__xe_id(xml_obj_ref);
if (!pcmk__xe_is(xml_obj_ref, PCMK_XE_OBJ_REF)) {
continue;
}
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID,
xml_obj_ref->name, tag_id);
continue;
}
if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
}
return TRUE;
}
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
{
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
pcmk_ticket_t *ticket = NULL;
ticket_id = pcmk__xe_id(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
return FALSE;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return FALSE;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) {
continue;
}
pcmk__insert_dup(ticket->state, prop_name, prop_value);
}
granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED);
if (granted && crm_is_true(granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED);
if (last_granted) {
long long last_granted_ll;
pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
ticket->last_granted = (time_t) last_granted_ll;
}
standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY);
if (standby && crm_is_true(standby)) {
ticket->standby = TRUE;
if (ticket->granted) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
ticket->standby = FALSE;
}
crm_trace("Done with ticket state for %s", ticket_id);
return TRUE;
}
static gboolean
unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
for (xml_obj = pcmk__xe_first_child(xml_tickets, NULL, NULL, NULL);
xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
if (!pcmk__xe_is(xml_obj, PCMK__XE_TICKET_STATE)) {
continue;
}
unpack_ticket_state(xml_obj, scheduler);
}
return TRUE;
}
static void
unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = NULL;
pcmk_resource_t *rsc = NULL;
if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
return;
}
if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) {
return;
}
crm_trace("Processing Pacemaker Remote node %s",
pcmk__node_name(this_node));
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE),
&(this_node->details->remote_maintenance), 0);
rsc = this_node->details->remote_rsc;
if (this_node->details->remote_requires_reset == FALSE) {
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
}
attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL,
NULL);
add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pcmk__node_name(this_node));
this_node->details->shutdown = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(this_node));
this_node->details->standby = TRUE;
}
if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))
|| ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) {
crm_info("%s is in maintenance mode", pcmk__node_name(this_node));
this_node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(this_node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__warn_once(pcmk__wo_rdisc_enabled,
"Support for the "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" node attribute is deprecated and will be removed"
" (and behave as 'true') in a future release.");
if (pcmk__is_remote_node(this_node)
&& !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
pcmk__node_name(this_node));
} else {
/* This is either a remote node with fencing enabled, or a guest
* node. We don't care whether fencing is enabled when fencing guest
* nodes, because they are "fenced" by recovering their containing
* resource.
*/
crm_info("%s has resource discovery disabled",
pcmk__node_name(this_node));
this_node->details->rsc_discovery_enabled = FALSE;
}
}
}
/*!
* \internal
* \brief Unpack a cluster node's transient attributes
*
* \param[in] state CIB node state XML
* \param[in,out] node Cluster node whose attributes are being unpacked
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = pcmk__xe_first_child(state,
PCMK__XE_TRANSIENT_ATTRIBUTES,
NULL, NULL);
add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in standby mode", pcmk__node_name(node));
node->details->standby = TRUE;
}
if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
pcmk__rsc_node_current))) {
crm_info("%s is in maintenance mode", pcmk__node_name(node));
node->details->maintenance = TRUE;
}
discovery = pcmk__node_attr(node,
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
NULL, pcmk__rsc_node_current);
if ((discovery != NULL) && !crm_is_true(discovery)) {
pcmk__config_warn("Ignoring "
PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
" attribute for %s because disabling resource"
" discovery is not allowed for cluster nodes",
pcmk__node_name(node));
}
}
/*!
* \internal
* \brief Unpack a node state entry (first pass)
*
* Unpack one node state entry from status. This unpacks information from the
* \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not
* the resource history inside it. Multiple passes through the status are needed
* to fully unpack everything.
*
* \param[in] state CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
pcmk_node_t *this_node = NULL;
id = crm_element_value(state, PCMK_XA_ID);
if (id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without "
PCMK_XA_ID);
crm_log_xml_info(state, "missing-id");
return;
}
uname = crm_element_value(state, PCMK_XA_UNAME);
if (uname == NULL) {
/* If a joining peer makes the cluster acquire the quorum from corosync
* meanwhile it has not joined CPG membership of pacemaker-controld yet,
* it's possible that the created PCMK__XE_NODE_STATE entry doesn't have
* a PCMK_XA_UNAME yet. We should recognize the node as `pending` and
* wait for it to join CPG.
*/
crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" "
"without " PCMK_XA_UNAME,
id);
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
crm_notice("Ignoring recorded state for removed node with name %s and "
PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id);
return;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
/* We can't determine the online status of Pacemaker Remote nodes until
* after all resource history has been unpacked. In this first pass, we
* do need to mark whether the node has been fenced, as this plays a
* role during unpacking cluster node resource state.
*/
pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED),
&(this_node->details->remote_was_fenced), 0);
return;
}
unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
*/
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
crm_trace("Determining online status of cluster node %s (id %s)",
pcmk__node_name(this_node), id);
determine_online_status(state, this_node, scheduler);
if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& this_node->details->online
&& (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
/*!
* \internal
* \brief Unpack nodes' resource history as much as possible
*
* Unpack as many nodes' resource history as possible in one pass through the
* status. We need to process Pacemaker Remote nodes' connections/containers
* before unpacking their history; the connection/container history will be
* in another node's history, so it might take multiple passes to unpack
* everything.
*
* \param[in] status CIB XML status section
* \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
* \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
// Loop through all PCMK__XE_NODE_STATE entries in CIB status
for (const xmlNode *state = pcmk__xe_first_child(status,
PCMK__XE_NODE_STATE, NULL,
NULL);
state != NULL; state = pcmk__xe_next_same(state)) {
const char *id = pcmk__xe_id(state);
const char *uname = crm_element_value(state, PCMK_XA_UNAME);
pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history from malformed "
PCMK__XE_NODE_STATE " without id and/or uname");
continue;
}
this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
"no longer in configuration", id);
continue;
}
if (this_node->details->unpacked) {
crm_trace("Not unpacking resource history for node %s because "
"already unpacked", id);
continue;
}
if (fence) {
// We're processing all remaining nodes
} else if (pcmk__is_guest_or_bundle_node(this_node)) {
/* We can unpack a guest node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL) || (rsc->role != pcmk_role_started)
|| (rsc->container->role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
continue;
}
} else if (pcmk__is_remote_node(this_node)) {
/* We can unpack a remote node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection is up, with the exception of when shutdown locks are
* in use.
*/
pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
|| (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
&& (rsc->role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
}
/* If fencing and shutdown locks are disabled and we're not processing
* unseen nodes, then we don't want to unpack offline nodes until online
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
} else if (!pcmk_any_flags_set(scheduler->flags,
pcmk_sched_fencing_enabled
|pcmk_sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
continue;
}
if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
return rc;
}
/* remove nodes that are down, stopping */
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
if (scheduler->tickets == NULL) {
scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__xe_is(state, PCMK_XE_TICKETS)) {
unpack_tickets_state((xmlNode *) state, scheduler);
} else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
unpack_node_state(state, scheduler);
}
}
while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
pcmk_is_set(scheduler->flags,
pcmk_sched_fencing_enabled),
scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
if (scheduler->stop_needed != NULL) {
for (GList *item = scheduler->stop_needed; item; item = item->next) {
pcmk_resource_t *container = item->data;
pcmk_node_t *node = pcmk__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
g_list_free(scheduler->stop_needed);
scheduler->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
pcmk_node_t *this_node = gIter->data;
if (!pcmk__is_pacemaker_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped,
"remote shutdown");
}
if (!this_node->details->unpacked) {
determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
/*!
* \internal
* \brief Unpack node's time when it became a member at the cluster layer
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
* \param[in,out] scheduler Scheduler data
*
* \return Epoch time when node became a cluster member
* (or scheduler effective time for legacy entries) if a member,
* 0 if not a member, or -1 if no valid information available
*/
static long long
unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
{
const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
int member = 0;
if (member_time == NULL) {
return -1LL;
} else if (crm_str_to_boolean(member_time, &member) == 1) {
/* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
* recorded as a boolean for a DC < 2.1.7, or the node is pending
* shutdown and has left the CPG, in which case it was set to 1 to avoid
* fencing for PCMK_OPT_NODE_PENDING_TIMEOUT.
*
* We return the effective time for in_ccm=1 because what's important to
* avoid fencing is that effective time minus this value is less than
* the pending node timeout.
*/
return member? (long long) get_effective_time(scheduler) : 0LL;
} else {
long long when_member = 0LL;
if ((pcmk__scan_ll(member_time, &when_member,
0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
" in " PCMK__XE_NODE_STATE " entry", member_time);
return -1LL;
}
return when_member;
}
}
/*!
* \internal
* \brief Unpack node's time when it became online in process group
*
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry
*
* \return Epoch time when node became online in process group (or 0 if not
* online, or 1 for legacy online entries)
*/
static long long
unpack_node_online(const xmlNode *node_state)
{
const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD);
// @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE,
pcmk__str_casei|pcmk__str_null_matches)) {
return 0LL;
} else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
return 1LL;
} else {
long long when_online = 0LL;
if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
|| (when_online < 0)) {
crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in "
PCMK__XE_NODE_STATE " entry, assuming offline", peer_time);
return 0LL;
}
return when_online;
}
}
/*!
* \internal
* \brief Unpack node attribute for user-requested fencing
*
* \param[in] node Node to check
* \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status
*
* \return \c true if fencing has been requested for \p node, otherwise \c false
*/
static bool
unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
{
long long value = 0LL;
int value_i = 0;
const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE,
NULL, pcmk__rsc_node_current);
// Value may be boolean or an epoch time
if (crm_str_to_boolean(value_s, &value_i) == 1) {
return (value_i != 0);
}
if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
return (value > 0);
}
crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
"node attribute for %s", value_s, pcmk__node_name(node));
return false;
}
static gboolean
determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
gboolean online = FALSE;
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
if (when_member <= 0) {
crm_trace("Node %s is %sdown", pcmk__node_name(this_node),
((when_member < 0)? "presumed " : ""));
} else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
crm_debug("Node %s is not ready to run resources: %s",
pcmk__node_name(this_node), join);
}
} else if (this_node->details->expected_up == FALSE) {
crm_trace("Node %s controller is down: "
"member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
/*!
* \internal
* \brief Check whether a node has taken too long to join controller group
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node to check
* \param[in] when_member Epoch time when node became a cluster member
* \param[in] when_online Epoch time when node joined controller group
*
* \return true if node has been pending (on the way up) longer than
* \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false
* \note This will also update the cluster's recheck time if appropriate.
*/
static inline bool
pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
long long when_member, long long when_online)
{
if ((scheduler->node_pending_timeout > 0)
&& (when_member > 0) && (when_online <= 0)) {
// There is a timeout on pending nodes, and node is pending
time_t timeout = when_member + scheduler->node_pending_timeout;
if (get_effective_time(node->details->data_set) >= timeout) {
return true; // Node has timed out
}
// Node is pending, but still has time
pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
return false;
}
static bool
determine_online_status_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
pcmk_node_t *this_node)
{
bool termination_requested = unpack_node_terminate(this_node, node_state);
const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
long long when_member = unpack_node_member(node_state, scheduler);
long long when_online = unpack_node_online(node_state);
/*
- PCMK__XA_JOIN ::= member|down|pending|banned
- PCMK_XA_EXPECTED ::= member|down
@COMPAT with entries recorded for DCs < 2.1.7
- PCMK__XA_IN_CCM ::= true|false
- PCMK_XA_CRMD ::= online|offline
Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
- PCMK__XA_IN_CCM ::= <timestamp>|0
Since when node has been a cluster member. A value 0 of means the node is not
a cluster member.
- PCMK_XA_CRMD ::= <timestamp>|0
Since when peer has been online in CPG. A value 0 means the peer is offline
in CPG.
*/
crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
pcmk__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
(termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pcmk__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
return (when_online > 0);
}
if (when_member < 0) {
pe_fence_node(scheduler, this_node,
"peer has not been seen by the cluster", FALSE);
return false;
}
if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
} else if (termination_requested) {
if ((when_member <= 0) && (when_online <= 0)
&& pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
crm_info("%s was fenced as requested", pcmk__node_name(this_node));
return false;
}
pe_fence_node(scheduler, this_node, "fencing was requested", false);
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
pcmk__str_null_matches)) {
if (pending_too_long(scheduler, this_node, when_member, when_online)) {
pe_fence_node(scheduler, this_node,
"peer pending timed out on joining the process group",
FALSE);
} else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pcmk__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
crm_trace("%s is down or still coming up",
pcmk__node_name(this_node));
}
} else if (when_member <= 0) {
// Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes
pe_fence_node(scheduler, this_node,
"peer is no longer part of the cluster", TRUE);
} else if (when_online <= 0) {
pe_fence_node(scheduler, this_node,
"peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
} else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pcmk__node_name(this_node));
} else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources",
pcmk__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
pe_fence_node(scheduler, this_node, "peer was in an unknown state",
FALSE);
}
return (when_member > 0);
}
static void
determine_remote_online_status(pcmk_scheduler_t *scheduler,
pcmk_node_t *this_node)
{
pcmk_resource_t *rsc = this_node->details->remote_rsc;
pcmk_resource_t *container = NULL;
pcmk_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
* be NULL. Consider it an offline remote node in that case.
*/
if (rsc == NULL) {
this_node->details->online = FALSE;
goto remote_online_done;
}
container = rsc->container;
if (container && pcmk__list_of_1(rsc->running_on)) {
host = rsc->running_on->data;
}
/* If the resource is currently started, mark it online. */
if (rsc->role == pcmk_role_started) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
if ((rsc->role == pcmk_role_started)
&& (rsc->next_role == pcmk_role_stopped)) {
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
} else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
} else if ((rsc->role == pcmk_role_stopped)
|| ((container != NULL)
&& (container->role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = FALSE;
} else if (host && (host->details->online == FALSE)
&& host->details->unclean) {
crm_trace("Guest node %s UNCLEAN because host is unclean",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
}
remote_online_done:
crm_trace("Remote node %s online=%s",
this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
}
static void
determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
this_node->details->shutdown = FALSE;
this_node->details->expected_up = FALSE;
if (pe__shutdown_requested(this_node)) {
this_node->details->shutdown = TRUE;
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
this_node->details->expected_up = TRUE;
}
if (this_node->details->type == node_ping) {
this_node->details->unclean = FALSE;
online = FALSE; /* As far as resource management is concerned,
* the node is safely offline.
* Anyone caught abusing this logic will be shot
*/
} else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
online = determine_online_status_no_fencing(scheduler, node_state,
this_node);
} else {
online = determine_online_status_fencing(scheduler, node_state,
this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -PCMK_SCORE_INFINITY;
}
if (online && this_node->details->shutdown) {
/* don't run resources here */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -PCMK_SCORE_INFINITY;
}
if (this_node->details->type == node_ping) {
crm_info("%s is not a Pacemaker node", pcmk__node_name(this_node));
} else if (this_node->details->unclean) {
pcmk__sched_warn("%s is unclean", pcmk__node_name(this_node));
} else if (this_node->details->online) {
crm_info("%s is %s", pcmk__node_name(this_node),
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
this_node->details->standby ? "standby" :
this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("%s is offline", pcmk__node_name(this_node));
}
}
/*!
* \internal
* \brief Find the end of a resource's name, excluding any clone suffix
*
* \param[in] id Resource ID to check
*
* \return Pointer to last character of resource's base name
*/
const char *
pe_base_name_end(const char *id)
{
if (!pcmk__str_empty(id)) {
const char *end = id + strlen(id) - 1;
for (const char *s = end; s > id; --s) {
switch (*s) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
return (s == end)? s : (s - 1);
default:
return end;
}
}
return end;
}
return NULL;
}
/*!
* \internal
* \brief Get a resource name excluding any clone suffix
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_strip(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
char *basename = NULL;
CRM_ASSERT(end);
basename = strndup(last_rsc_id, end - last_rsc_id + 1);
CRM_ASSERT(basename);
return basename;
}
/*!
* \internal
* \brief Get the name of the first instance of a cloned resource
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name plus :0
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_zero(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
size_t base_name_len = end - last_rsc_id + 1;
char *zero = NULL;
CRM_ASSERT(end);
zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char));
memcpy(zero, last_rsc_id, base_name_len);
zero[base_name_len] = ':';
zero[base_name_len + 1] = '0';
return zero;
}
static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE);
pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none);
crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
node = pcmk_find_node(scheduler, rsc_id);
if (node == NULL) {
node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, NULL,
scheduler);
}
link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
node->details->shutdown = TRUE;
}
}
if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
pcmk__set_rsc_flags(rsc, pcmk_rsc_removed_filler);
}
pcmk__set_rsc_flags(rsc, pcmk_rsc_removed);
scheduler->resources = g_list_append(scheduler->resources, rsc);
return rsc;
}
/*!
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
* \param[in,out] parent Clone resource that orphan will be added to
* \param[in] rsc_id Orphan's resource ID
* \param[in] node Where orphan is active (for logging only)
* \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
static pcmk_resource_t *
create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
// find_rsc() because we might be a cloned group
pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL,
pcmk_rsc_match_clone_only);
pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pcmk__node_name(node));
return orphan;
}
/*!
* \internal
* \brief Check a node for an instance of an anonymous clone
*
* Return a child instance of the specified anonymous clone, in order of
* preference: (1) the instance running on the specified node, if any;
* (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX
* instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX
* instances are already active).
*
* \param[in,out] scheduler Scheduler data
* \param[in] node Node on which to check for instance
* \param[in,out] parent Clone to check
* \param[in] rsc_id Name of cloned resource in history (no instance)
*/
static pcmk_resource_t *
find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(pcmk__is_anonymous_clone(parent));
// Check for active (or partially active, for cloned groups) instance
pcmk__rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pcmk__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
* for a resource before the resource's individual operation history
* entries are unpacked, locations will generally not contain the
* desired node.
*
* However, there are three exceptions:
* (1) when child is a cloned group and we have already unpacked the
* history of another member of the group on the same node;
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if
* PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and
* (3) when we re-run calculations on the same scheduler data as part of
* a simulation.
*/
child->fns->location(child, &locations, 2);
if (locations) {
/* We should never associate the same numbered anonymous clone
* instance with multiple nodes, and clone instances can't migrate,
* so there must be only one location, regardless of history.
*/
CRM_LOG_ASSERT(locations->next == NULL);
if (pcmk__same_node((pcmk_node_t *) locations->data, node)) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
* need the particular member corresponding to rsc_id.
*
* If the history entry is orphaned, rsc will be NULL.
*/
rsc = parent->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
* happen if PCMK_META_GLOBALLY_UNIQUE is switched from true
* to false), we want to consider the instances beyond the
* first as orphans, even if there are inactive instance
* numbers available.
*/
if (rsc->running_on) {
crm_notice("Active (now-)anonymous clone %s has "
"multiple (orphan) instance histories on %s",
parent->id, pcmk__node_name(node));
skip_inactive = TRUE;
rsc = NULL;
} else {
pcmk__rsc_trace(parent, "Resource %s, active", rsc->id);
}
}
}
g_list_free(locations);
} else {
pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
&& !pcmk_is_set(child->flags, pcmk_rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
*/
if ((inactive_instance != NULL) &&
(inactive_instance->pending_node != NULL) &&
!pcmk__same_node(inactive_instance->pending_node, node)) {
inactive_instance = NULL;
}
}
}
}
if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
pcmk__rsc_trace(parent, "Resource %s, empty slot",
inactive_instance->id);
rsc = inactive_instance;
}
/* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or
* PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we
* don't want to consume a valid instance number for unclean nodes. Such
* instances may appear to be active according to the history, but should be
* considered inactive, so we can start an instance elsewhere. Treat such
* instances as orphans.
*
* An exception is instances running on guest nodes -- since guest node
* "fencing" is actually just a resource stop, requires shouldn't apply.
*
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pcmk__is_guest_or_bundle_node(node)
&& !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
static pcmk_resource_t *
unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
pcmk_resource_t *rsc = NULL;
pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
* check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0,
* we create a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
clone0_id);
if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
} else {
crm_trace("%s is not known as %s either (orphan)",
rsc_id, clone0_id);
}
free(clone0_id);
} else if (rsc->variant > pcmk_rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
} else {
parent = uber_parent(rsc);
}
if (pcmk__is_anonymous_clone(parent)) {
if (pcmk__is_bundled(parent)) {
rsc = pe__find_bundle_replica(parent->parent, node);
} else {
char *base = clone_strip(rsc_id);
rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
}
if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none)
&& !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_none)) {
pcmk__str_update(&rsc->clone_name, rsc_id);
pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pcmk__node_name(node), rsc->id,
pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : "");
}
return rsc;
}
static pcmk_resource_t *
process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
pcmk_scheduler_t *scheduler)
{
pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
crm_debug("Detected orphan resource %s on %s",
rsc_id, pcmk__node_name(node));
rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id);
resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
"__orphan_do_not_run__", scheduler);
}
return rsc;
}
static void
process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum action_fail_response on_fail)
{
pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
enum action_fail_response save_on_fail = pcmk_on_fail_ignore;
CRM_ASSERT(rsc);
pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
rsc->id, pcmk_role_text(rsc->role), pcmk__node_name(node),
pcmk_on_fail_text(on_fail));
/* process current state */
if (rsc->role != pcmk_role_unknown) {
pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
pcmk_node_t *n = pe__copy_node(node);
pcmk__rsc_trace(rsc, "%s%s%s known on %s",
rsc->id,
((rsc->clone_name == NULL)? "" : " also known as "),
((rsc->clone_name == NULL)? "" : rsc->clone_name),
pcmk__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) {
break;
}
iter = iter->parent;
}
}
/* If a managed resource is believed to be running, but node is down ... */
if ((rsc->role > pcmk_role_stopped)
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
&& pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
gboolean should_fence = FALSE;
/* If this is a guest node, fence it (regardless of whether fencing is
* enabled, because guest node fencing is done by recovery of the
* container resource rather than by the fencer). Mark the resource
* we're processing as failed. When the guest comes back up, its
* operation history in the CIB will be cleared, freeing the affected
* resource to run again once we are sure we know its state.
*/
if (pcmk__is_guest_or_bundle_node(node)) {
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
should_fence = TRUE;
} else if (pcmk_is_set(rsc->cluster->flags,
pcmk_sched_fencing_enabled)) {
if (pcmk__is_remote_node(node)
&& (node->details->remote_rsc != NULL)
&& !pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
* somewhere. This allows connection resources on a failed
* cluster node to move to another node without requiring the
* remote nodes to be fenced as well.
*/
node->details->unseen = TRUE;
reason = crm_strdup_printf("%s is active there (fencing will be"
" revoked if remote connection can "
"be re-established elsewhere)",
rsc->id);
}
should_fence = TRUE;
}
if (should_fence) {
if (reason == NULL) {
reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
}
pe_fence_node(rsc->cluster, node, reason, FALSE);
}
free(reason);
}
/* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
save_on_fail = on_fail;
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = pcmk_on_fail_ignore;
}
switch (on_fail) {
case pcmk_on_fail_ignore:
/* nothing to do */
break;
case pcmk_on_fail_demote:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed);
demote_action(rsc, node, FALSE);
break;
case pcmk_on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
reason = crm_strdup_printf("%s failed there", rsc->id);
pe_fence_node(rsc->cluster, node, reason, FALSE);
free(reason);
break;
case pcmk_on_fail_standby_node:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
case pcmk_on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(rsc, pcmk_rsc_blocked);
break;
case pcmk_on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -PCMK_SCORE_INFINITY,
"__action_migration_auto__", rsc->cluster);
break;
case pcmk_on_fail_stop:
pe__set_next_role(rsc, pcmk_role_stopped,
PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP);
break;
case pcmk_on_fail_restart:
if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
case pcmk_on_fail_restart_container:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if ((rsc->container != NULL) && pcmk__is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
* container is running yet, so remember it and add a stop
* action for it later.
*/
rsc->cluster->stop_needed =
g_list_prepend(rsc->cluster->stop_needed, rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
} else if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
stop_action(rsc, node, FALSE);
}
break;
case pcmk_on_fail_reset_remote:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pcmk_find_node(rsc->cluster, rsc->id);
}
if (pcmk__is_remote_node(tmpnode)
&& !(tmpnode->details->remote_was_fenced)) {
/* The remote connection resource failed in a way that
* should result in fencing the remote node.
*/
pe_fence_node(rsc->cluster, tmpnode,
"remote connection is unrecoverable", FALSE);
}
}
/* require the stop action regardless if fencing is occurring or not. */
if (rsc->role > pcmk_role_stopped) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
/* ensure a remote-node connection failure forces an unclean remote-node
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) {
tmpnode = pcmk_find_node(rsc->cluster, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
if ((rsc->role != pcmk_role_stopped)
&& (rsc->role != pcmk_role_unknown)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Removed resource %s is active on %s and will be "
"stopped when possible",
rsc->id, pcmk__node_name(node));
} else {
crm_notice("Removed resource %s must be stopped manually on %s "
"because " PCMK_OPT_STOP_ORPHAN_RESOURCES
" is set to false", rsc->id, pcmk__node_name(node));
}
}
native_add_running(rsc, node, rsc->cluster,
(save_on_fail != pcmk_on_fail_ignore));
switch (on_fail) {
case pcmk_on_fail_ignore:
break;
case pcmk_on_fail_demote:
case pcmk_on_fail_block:
pcmk__set_rsc_flags(rsc, pcmk_rsc_failed);
break;
default:
pcmk__set_rsc_flags(rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
break;
}
} else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
/* Only do this for older status sections that included instance numbers
* Otherwise stopped instances will appear as orphans
*/
pcmk__rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)",
rsc->clone_name, rsc->id);
free(rsc->clone_name);
rsc->clone_name = NULL;
} else {
GList *possible_matches = pe__resource_actions(rsc, node,
PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
pcmk__set_action_flags(stop, pcmk_action_optional);
}
g_list_free(possible_matches);
}
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
if ((rsc->role == pcmk_role_stopped)
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
rsc->role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GList *gIter = sorted_op_list;
CRM_ASSERT(rsc);
pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d",
rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
char *key = NULL;
const char *id = pcmk__xe_id(rsc_op);
counter++;
if (node->details->online == FALSE) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline",
rsc->id, pcmk__node_name(node));
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active",
id, pcmk__node_name(node));
continue;
} else if (counter < start_index) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d",
id, pcmk__node_name(node), counter);
continue;
}
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if (interval_ms == 0) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring",
id, pcmk__node_name(node));
continue;
}
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
pcmk__rsc_trace(rsc, "Skipping %s on %s: status",
id, pcmk__node_name(node));
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node));
custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
void
calculate_active_ops(const GList *sorted_op_list, int *start_index,
int *stop_index)
{
int counter = -1;
int implied_monitor_start = -1;
int implied_clone_start = -1;
const char *task = NULL;
const char *status = NULL;
*stop_index = -1;
*start_index = -1;
for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
const xmlNode *rsc_op = (const xmlNode *) iter->data;
counter++;
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
} else if ((implied_monitor_start <= *stop_index)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
} else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
if (*start_index == -1) {
if (implied_clone_start != -1) {
*start_index = implied_clone_start;
} else if (implied_monitor_start != -1) {
*start_index = implied_monitor_start;
}
}
}
// If resource history entry has shutdown lock, remember lock node and time
static void
unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
if ((scheduler->shutdown_lock > 0)
&& (get_effective_time(scheduler)
> (lock_time + scheduler->shutdown_lock))) {
pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pcmk__node_name(node));
pe__clear_resource_history(rsc, node);
} else {
/* @COMPAT I don't like breaking const signatures, but
* rsc->lock_node should really be const -- we just can't change it
* until the next API compatibility break.
*/
rsc->lock_node = (pcmk_node_t *) node;
rsc->lock_time = lock_time;
}
}
}
/*!
* \internal
* \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked
* \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
static pcmk_resource_t *
unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = pcmk__xe_id(lrm_resource);
pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
enum action_fail_response on_fail = pcmk_on_fail_ignore;
enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE
" entry: No " PCMK_XA_ID);
crm_log_xml_info(lrm_resource, "missing-id");
return NULL;
}
crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s",
rsc_id, pcmk__node_name(node));
/* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort
* them
*/
for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL,
NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next_same(rsc_op)) {
op_list = g_list_prepend(op_list, rsc_op);
}
if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
}
}
/* find the resource */
rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->role;
rsc->role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
if ((rsc->next_role == pcmk_role_unknown)
|| (req_role < rsc->next_role)) {
pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
pcmk__rsc_info(rsc,
"%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, pcmk_role_text(rsc->next_role),
pcmk_role_text(req_role));
}
}
if (saved_role > rsc->role) {
rsc->role = saved_role;
}
return rsc;
}
static void
handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list, NULL,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
pcmk_resource_t *rsc;
pcmk_resource_t *container;
const char *rsc_id;
const char *container_id;
if (!pcmk__xe_is(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
continue;
}
container_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER);
rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
if (container_id == NULL || rsc_id == NULL) {
continue;
}
container = pe_find_resource(scheduler->resources, container_id);
if (container == NULL) {
continue;
}
rsc = pe_find_resource(scheduler->resources, rsc_id);
if ((rsc == NULL) || (rsc->container != NULL)
|| !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
continue;
}
pcmk__rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
rsc->id, container_id);
rsc->container = container;
container->fillers = g_list_append(container->fillers, rsc);
}
}
/*!
* \internal
* \brief Unpack one node's lrm status section
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] xml CIB node state XML
* \param[in,out] scheduler Scheduler data
*/
static void
unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
pcmk_scheduler_t *scheduler)
{
bool found_orphaned_container_filler = false;
// Drill down to PCMK__XE_LRM_RESOURCES section
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL);
if (xml == NULL) {
return;
}
xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL);
if (xml == NULL) {
return;
}
// Unpack each PCMK__XE_LRM_RESOURCE entry
for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL; rsc_entry = pcmk__xe_next_same(rsc_entry)) {
pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
&& pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
found_orphaned_container_filler = true;
}
}
/* Now that all resource state has been unpacked for this node, map any
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
handle_orphaned_container_fillers(xml, scheduler);
}
}
static void
set_active(pcmk_resource_t *rsc)
{
const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) {
rsc->role = pcmk_role_unpromoted;
} else {
rsc->role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
pcmk_node_t *node = value;
int *score = user_data;
node->weight = *score;
}
#define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE
#define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \
"/" PCMK__XE_LRM_RESOURCES \
"/" PCMK__XE_LRM_RESOURCE
#define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']"
SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'",
NULL);
/* Need to check against transition_magic too? */
if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_TARGET "='", source, "']",
NULL);
} else if ((source != NULL)
&& (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" PCMK__META_MIGRATE_SOURCE "='", source, "']",
NULL);
} else {
g_string_append_c(xpath, ']');
}
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
if (xml && target_rc >= 0) {
int rc = PCMK_OCF_UNKNOWN_ERROR;
int status = PCMK_EXEC_ERROR;
crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc);
crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status);
if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
return NULL;
}
}
return xml;
}
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']",
NULL);
xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
return xml;
}
/*!
* \internal
* \brief Check whether a resource has no completed action history on a node
*
* \param[in,out] rsc Resource to check
* \param[in] node_name Node to check
*
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
char *xpath = NULL;
xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"
SUB_XPATH_LRM_RSC_OP
"[@" PCMK__XA_RC_CODE "!='%d']",
node_name, rsc->id, PCMK_OCF_UNKNOWN);
search = xpath_search(rsc->cluster->input, xpath);
result = (numXpathResults(search) == 0);
freeXpathObject(search);
free(xpath);
return result;
}
/*!
* \brief Check whether a probe/monitor indicating the resource was not running
* on a node happened after some event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
/*!
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that non-monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
op != NULL; op = pcmk__xe_next_same(op)) {
const char * task = NULL;
if (op == xml_op) {
continue;
}
task = crm_element_value(op, PCMK_XA_OPERATION);
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
}
return false;
}
/*!
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] migrate_to Any migrate_to event that is being compared to
* \param[in] migrate_from Any migrate_from event that is being compared to
* \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = migrate_to;
const char *source = NULL;
const char *target = NULL;
bool same_node = false;
if (migrate_from) {
xml_op = migrate_from;
}
source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE);
target = crm_element_value(xml_op, PCMK__META_MIGRATE_TARGET);
/* It's preferred to compare to the migrate event on the same node if
* existing, since call ids are more reliable.
*/
if (pcmk__str_eq(node_name, target, pcmk__str_casei)) {
if (migrate_from) {
xml_op = migrate_from;
same_node = true;
} else {
xml_op = migrate_to;
}
} else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) {
if (migrate_to) {
xml_op = migrate_to;
same_node = true;
} else {
xml_op = migrate_from;
}
}
/* If there's any newer non-monitor operation on the node, or any newer
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
scheduler);
}
/*!
* \internal
* \brief Parse migration source and target node names from history entry
*
* \param[in] entry Resource history entry for a migration action
* \param[in] source_node If not NULL, source must match this node
* \param[in] target_node If not NULL, target must match this node
* \param[out] source_name Where to store migration source node name
* \param[out] target_name Where to store migration target node name
*
* \return Standard Pacemaker return code
*/
static int
get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE);
*target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET);
if ((*source_name == NULL) || (*target_name == NULL)) {
pcmk__config_err("Ignoring resource history entry %s without "
PCMK__META_MIGRATE_SOURCE " and "
PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry));
return pcmk_rc_unpack_error;
}
if ((source_node != NULL)
&& !pcmk__str_eq(*source_name, source_node->details->uname,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_SOURCE "='%s' does not match %s",
pcmk__xe_id(entry), *source_name,
pcmk__node_name(source_node));
return pcmk_rc_unpack_error;
}
if ((target_node != NULL)
&& !pcmk__str_eq(*target_name, target_node->details->uname,
pcmk__str_casei|pcmk__str_null_matches)) {
pcmk__config_err("Ignoring resource history entry %s because "
PCMK__META_MIGRATE_TARGET "='%s' does not match %s",
pcmk__xe_id(entry), *target_name,
pcmk__node_name(target_node));
return pcmk_rc_unpack_error;
}
return pcmk_rc_ok;
}
/*
* \internal
* \brief Add a migration source to a resource's list of dangling migrations
*
* If the migrate_to and migrate_from actions in a live migration both
* succeeded, but there is no stop on the source, the migration is considered
* "dangling." Add the source to the resource's dangling migration list, which
* will be used to schedule a stop on the source without affecting the target.
*
* \param[in,out] rsc Resource involved in migration
* \param[in] node Migration source
*/
static void
add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pcmk__node_name(node));
rsc->role = pcmk_role_stopped;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
(gpointer) node);
}
/*!
* \internal
* \brief Update resource role etc. after a successful migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_success(struct action_history *history)
{
/* A complete migration sequence is:
* 1. migrate_to on source node (which succeeded if we get to this function)
* 2. migrate_from on target node
* 3. stop on source node
*
* If no migrate_from has happened, the migration is considered to be
* "partial". If the migrate_from succeeded but no stop has happened, the
* migration is considered to be "dangling".
*
* If a successful migrate_to and stop have happened on the source node, we
* still need to check for a partial migration, due to scenarios (easier to
* produce with batch-limit=1) like:
*
* - A resource is migrating from node1 to node2, and a migrate_to is
* initiated for it on node1.
*
* - node2 goes into standby mode while the migrate_to is pending, which
* aborts the transition.
*
* - Upon completion of the migrate_to, a new transition schedules a stop
* on both nodes and a start on node1.
*
* - If the new transition is aborted for any reason while the resource is
* stopping on node1, the transition after that stop completes will see
* the migrate_to and stop on the source, but it's still a partial
* migration, and the resource must be stopped on node2 because it is
* potentially active there due to the migrate_to.
*
* We also need to take into account that either node's history may be
* cleared at any point in the migration process.
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
bool source_newer_op = false;
bool target_newer_state = false;
bool active_on_target = false;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
// Check for newer state on the source
source_newer_op = non_monitor_after(history->rsc->id, source, history->xml,
true, history->rsc->cluster);
// Check for a migrate_from action from this source on the target
migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
target, source, -1, history->rsc->cluster);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
* migrate_from on the target, so this migrate_to is irrelevant to
* the resource's state.
*/
return;
}
crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc);
crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status);
}
/* If the resource has newer state on both the source and target after the
* migration events, this migrate_to is irrelevant to the resource's state.
*/
target_newer_state = newer_state_after_migrate(history->rsc->id, target,
history->xml, migrate_from,
history->rsc->cluster);
if (source_newer_op && target_newer_state) {
return;
}
/* Check for dangling migration (migrate_from succeeded but stop not done).
* We know there's no stop because we already returned if the target has a
* migrate_from and the source has any newer non-monitor operation.
*/
if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
add_dangling_migration(history->rsc, history->node);
return;
}
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
history->rsc->role = pcmk_role_started;
target_node = pcmk_find_node(history->rsc->cluster, target);
active_on_target = !target_newer_state && (target_node != NULL)
&& target_node->details->online;
if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
if (active_on_target) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable);
}
return;
}
// The migrate_from is pending, complete but erased, or to be scheduled
/* If there is no history at all for the resource on an online target, then
* it was likely cleaned. Just return, and we'll schedule a probe. Once we
* have the probe result, it will be reflected in target_newer_state.
*/
if ((target_node != NULL) && target_node->details->online
&& unknown_on_node(history->rsc, target)) {
return;
}
if (active_on_target) {
pcmk_node_t *source_node = pcmk_find_node(history->rsc->cluster,
source);
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
if ((source_node != NULL) && source_node->details->online) {
/* This is a partial migration: the migrate_to completed
* successfully on the source, but the migrate_from has not
* completed. Remember the source and target; if the newly
* chosen target remains the same when we schedule actions
* later, we may continue with the migration.
*/
history->rsc->partial_migration_target = target_node;
history->rsc->partial_migration_source = source_node;
}
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
pcmk__set_rsc_flags(history->rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_to action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_to_failure(struct action_history *history)
{
xmlNode *target_migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, history->node, NULL, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->role = pcmk_role_started;
// Check for migrate_from on the target
target_migrate_from = find_lrm_op(history->rsc->id,
PCMK_ACTION_MIGRATE_FROM, target, source,
PCMK_OCF_OK, history->rsc->cluster);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, target)
/* If the resource has newer state on the target after the migration
* events, this migrate_to no longer matters for the target.
*/
&& !newer_state_after_migrate(history->rsc->id, target, history->xml,
target_migrate_from,
history->rsc->cluster)) {
/* The resource has no newer state on the target, so assume it's still
* active there.
* (if it is up).
*/
pcmk_node_t *target_node = pcmk_find_node(history->rsc->cluster,
target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
}
} else if (!non_monitor_after(history->rsc->id, source, history->xml, true,
history->rsc->cluster)) {
/* We know the resource has newer state on the target, but this
* migrate_to still matters for the source as long as there's no newer
* non-monitor operation there.
*/
// Mark node as having dangling migration so we can force a stop later
history->rsc->dangling_migrations =
g_list_prepend(history->rsc->dangling_migrations,
(gpointer) history->node);
}
}
/*!
* \internal
* \brief Update resource role etc. after a failed migrate_from action
*
* \param[in,out] history Parsed action result history
*/
static void
unpack_migrate_from_failure(struct action_history *history)
{
xmlNode *source_migrate_to = NULL;
const char *source = NULL;
const char *target = NULL;
// Get source and target node names from XML
if (get_migration_node_names(history->xml, NULL, history->node, &source,
&target) != pcmk_rc_ok) {
return;
}
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
history->rsc->role = pcmk_role_started;
// Check for a migrate_to on the source
source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK,
history->rsc->cluster);
if (/* If the resource state is unknown on the source, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(history->rsc, source)
/* If the resource has newer state on the source after the migration
* events, this migrate_from no longer matters for the source.
*/
&& !newer_state_after_migrate(history->rsc->id, source,
source_migrate_to, history->xml,
history->rsc->cluster)) {
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
pcmk_node_t *source_node = pcmk_find_node(history->rsc->cluster,
source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, history->rsc->cluster,
TRUE);
}
}
}
/*!
* \internal
* \brief Add an action to cluster's list of failed actions
*
* \param[in,out] history Parsed action result history
*/
static void
record_failed_op(struct action_history *history)
{
if (!(history->node->details->online)) {
return;
}
for (const xmlNode *xIter = history->rsc->cluster->failed->children;
xIter != NULL; xIter = xIter->next) {
const char *key = pcmk__xe_history_key(xIter);
const char *uname = crm_element_value(xIter, PCMK_XA_UNAME);
if (pcmk__str_eq(history->key, key, pcmk__str_none)
&& pcmk__str_eq(uname, history->node->details->uname,
pcmk__str_casei)) {
crm_trace("Skipping duplicate entry %s on %s",
history->key, pcmk__node_name(history->node));
return;
}
}
crm_trace("Adding entry for %s on %s to failed action list",
history->key, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname);
crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id);
pcmk__xml_copy(history->rsc->cluster->failed, history->xml);
}
static char *
last_change_str(const xmlNode *xml_op)
{
time_t when;
char *result = NULL;
if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE,
&when) == pcmk_ok) {
char *when_s = pcmk__epoch2str(&when, 0);
const char *p = strchr(when_s, ' ');
// Skip day of week to make message shorter
if ((p != NULL) && (*(++p) != '\0')) {
result = pcmk__str_copy(p);
}
free(when_s);
}
if (result == NULL) {
result = pcmk__str_copy("unknown_time");
}
return result;
}
/*!
* \internal
* \brief Compare two on-fail values
*
* \param[in] first One on-fail value to compare
* \param[in] second The other on-fail value to compare
*
* \return A negative number if second is more severe than first, zero if they
* are equal, or a positive number if first is more severe than second.
* \note This is only needed until the action_fail_response values can be
* renumbered at the next API compatibility break.
*/
static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
case pcmk_on_fail_demote:
switch (second) {
case pcmk_on_fail_ignore:
return 1;
case pcmk_on_fail_demote:
return 0;
default:
return -1;
}
break;
case pcmk_on_fail_reset_remote:
switch (second) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
return 1;
case pcmk_on_fail_reset_remote:
return 0;
default:
return -1;
}
break;
case pcmk_on_fail_restart_container:
switch (second) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_reset_remote:
return 1;
case pcmk_on_fail_restart_container:
return 0;
default:
return -1;
}
break;
default:
break;
}
switch (second) {
case pcmk_on_fail_demote:
return (first == pcmk_on_fail_ignore)? -1 : 1;
case pcmk_on_fail_reset_remote:
switch (first) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
return -1;
default:
return 1;
}
break;
case pcmk_on_fail_restart_container:
switch (first) {
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_reset_remote:
return -1;
default:
return 1;
}
break;
default:
break;
}
return first - second;
}
/*!
* \internal
* \brief Ban a resource (or its clone if an anonymous instance) from all nodes
*
* \param[in,out] rsc Resource to ban
*/
static void
ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -PCMK_SCORE_INFINITY;
pcmk_resource_t *fail_rsc = rsc;
if (fail_rsc->parent != NULL) {
pcmk_resource_t *parent = uber_parent(fail_rsc);
if (pcmk__is_anonymous_clone(parent)) {
/* For anonymous clones, if an operation with
* PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the
* entire clone must stop.
*/
fail_rsc = parent;
}
}
// Ban the resource from all nodes
crm_notice("%s will not be started under current conditions", fail_rsc->id);
if (fail_rsc->allowed_nodes != NULL) {
g_hash_table_destroy(fail_rsc->allowed_nodes);
}
fail_rsc->allowed_nodes = pe__node_list2table(rsc->cluster->nodes);
g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
}
/*!
* \internal
* \brief Get configured failure handling and role after failure for an action
*
* \param[in,out] history Unpacked action history entry
* \param[out] on_fail Where to set configured failure handling
* \param[out] fail_role Where to set to role after failure
*/
static void
unpack_failure_handling(struct action_history *history,
enum action_fail_response *on_fail,
enum rsc_role_e *fail_role)
{
xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
history->interval_ms, true);
GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
history->task,
history->interval_ms, config);
const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
*on_fail = pcmk__parse_on_fail(history->rsc, history->task,
history->interval_ms, on_fail_str);
*fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
meta);
g_hash_table_destroy(meta);
}
/*!
* \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
* \param[in,out] history Parsed action result history
* \param[in] config_on_fail Action failure handling from configuration
* \param[in] fail_role Resource's role after failure of this action
* \param[out] last_failure This will be set to the history XML
* \param[in,out] on_fail Actual handling of action result
*/
static void
unpack_rsc_op_failure(struct action_history *history,
enum action_fail_response config_on_fail,
enum rsc_role_e fail_role, xmlNode **last_failure,
enum action_fail_response *on_fail)
{
bool is_probe = false;
char *last_change_s = NULL;
*last_failure = history->xml;
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
} else {
pcmk__sched_warn("Unexpected result (%s%s%s) was recorded for %s of "
"%s on %s at %s " CRM_XS " exit-status=%d id=%s",
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
(is_probe? "probe" : history->task), history->rsc->id,
pcmk__node_name(history->node), last_change_s,
history->exit_status, history->id);
if (is_probe && (history->exit_status != PCMK_OCF_OK)
&& (history->exit_status != PCMK_OCF_NOT_RUNNING)
&& (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) {
/* A failed (not just unexpected) probe result could mean the user
* didn't know resources will be probed even where they can't run.
*/
crm_notice("If it is not possible for %s to run on %s, see "
"the " PCMK_XA_RESOURCE_DISCOVERY " option for location "
"constraints",
history->rsc->id, pcmk__node_name(history->node));
}
record_failed_op(history);
}
free(last_change_s);
if (cmp_on_fail(*on_fail, config_on_fail) < 0) {
pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s",
pcmk_on_fail_text(*on_fail),
pcmk_on_fail_text(config_on_fail), history->key);
*on_fail = config_on_fail;
}
if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY,
"__stop_fail__", history->rsc->cluster);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->role = pcmk_role_promoted;
} else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
if (config_on_fail == pcmk_on_fail_block) {
history->rsc->role = pcmk_role_promoted;
pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with " PCMK_META_ON_FAIL "=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
history->rsc->role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
* controller into a loop. Setting the role to unpromoted is not
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
history->rsc->role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
history->rsc->role = pcmk_role_stopped;
} else if (history->rsc->role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
pcmk__rsc_trace(history->rsc,
"Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s",
history->rsc->id, pcmk_role_text(history->rsc->role),
pcmk__btoa(history->node->details->unclean),
pcmk_on_fail_text(config_on_fail),
pcmk_role_text(fail_role));
if ((fail_role != pcmk_role_started)
&& (history->rsc->next_role < fail_role)) {
pe__set_next_role(history->rsc, fail_role, "failure");
}
if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
}
/*!
* \internal
* \brief Block a resource with a failed action if it cannot be recovered
*
* If resource action is a failed stop and fencing is not possible, mark the
* resource as unmanaged and blocked, since recovery cannot be done.
*
* \param[in,out] history Parsed action history entry
*/
static void
block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->details->data_set, history->node)) {
return; // Failed stops are recoverable via fencing
}
last_change_s = last_change_str(history->xml);
pcmk__sched_err("No further recovery can be attempted for %s "
"because %s on %s failed (%s%s%s) at %s "
CRM_XS " rc=%d id=%s",
history->rsc->id, history->task,
pcmk__node_name(history->node),
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""),
last_change_s, history->exit_status, history->id);
free(last_change_s);
pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_managed);
pcmk__set_rsc_flags(history->rsc, pcmk_rsc_blocked);
}
/*!
* \internal
* \brief Update action history's execution status and why
*
* \param[in,out] history Parsed action history entry
* \param[out] why Where to store reason for update
* \param[in] value New value
* \param[in] reason Description of why value was changed
*/
static inline void
remap_because(struct action_history *history, const char **why, int value,
const char *reason)
{
if (history->execution_status != value) {
history->execution_status = value;
*why = reason;
}
}
/*!
* \internal
* \brief Remap informational monitor results and operation status
*
* For the monitor results, certain OCF codes are for providing extended information
* to the user about services that aren't yet failed but not entirely healthy either.
* These must be treated as the "normal" result by Pacemaker.
*
* For operation status, the action result can be used to determine an appropriate
* status for the purposes of responding to the action. The status provided by the
* executor is not directly usable since the executor does not know what was expected.
*
* \param[in,out] history Parsed action history entry
* \param[in,out] on_fail What should be done about the result
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
* the operation will be recorded in the scheduler data's list of failed
* operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
static void
remap_operation(struct action_history *history,
enum action_fail_response *on_fail, bool expired)
{
bool is_probe = false;
int orig_exit_status = history->exit_status;
int orig_exec_status = history->execution_status;
const char *why = NULL;
const char *task = history->task;
// Remap degraded results to their successful counterparts
history->exit_status = pcmk__effective_rc(history->exit_status);
if (history->exit_status != orig_exit_status) {
why = "degraded result";
if (!expired && (!history->node->details->shutdown
|| history->node->details->online)) {
record_failed_op(history);
}
}
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& ((history->execution_status != PCMK_EXEC_DONE)
|| (history->exit_status != PCMK_OCF_NOT_RUNNING))) {
history->execution_status = PCMK_EXEC_DONE;
history->exit_status = PCMK_OCF_NOT_RUNNING;
why = "equivalent probe result";
}
/* If the executor reported an execution status of anything but done or
* error, consider that final. But for done or error, we know better whether
* it should be treated as a failure or not, because we know the expected
* result.
*/
switch (history->execution_status) {
case PCMK_EXEC_DONE:
case PCMK_EXEC_ERROR:
break;
// These should be treated as node-fatal
case PCMK_EXEC_NO_FENCE_DEVICE:
case PCMK_EXEC_NO_SECRETS:
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"node-fatal error");
goto remap_done;
default:
goto remap_done;
}
is_probe = pcmk_xe_is_probe(history->xml);
if (is_probe) {
task = "probe";
}
if (history->expected_exit_status < 0) {
/* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
* Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
* expected exit status in the transition key, which (along with the
* similar case of a corrupted transition key in the CIB) will be
* reported to this function as -1. Pacemaker 2.0+ does not support
* rolling upgrades from those versions or processing of saved CIB files
* from those versions, so we do not need to care much about this case.
*/
remap_because(history, &why, PCMK_EXEC_ERROR,
"obsolete history format");
pcmk__config_warn("Expected result not found for %s on %s "
"(corrupt or obsolete CIB?)",
history->key, pcmk__node_name(history->node));
} else if (history->exit_status == history->expected_exit_status) {
remap_because(history, &why, PCMK_EXEC_DONE, "expected result");
} else {
remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result");
pcmk__rsc_debug(history->rsc,
"%s on %s: expected %d (%s), got %d (%s%s%s)",
history->key, pcmk__node_name(history->node),
history->expected_exit_status,
services_ocf_exitcode_str(history->expected_exit_status),
history->exit_status,
services_ocf_exitcode_str(history->exit_status),
(pcmk__str_empty(history->exit_reason)? "" : ": "),
pcmk__s(history->exit_reason, ""));
}
switch (history->exit_status) {
case PCMK_OCF_OK:
if (is_probe
&& (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active on %s at %s",
history->rsc->id, pcmk__node_name(history->node),
last_change_s);
free(last_change_s);
}
break;
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
|| !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
history->rsc->role = pcmk_role_stopped;
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
case PCMK_OCF_RUNNING_PROMOTED:
if (is_probe
&& (history->exit_status != history->expected_exit_status)) {
char *last_change_s = last_change_str(history->xml);
remap_because(history, &why, PCMK_EXEC_DONE, "probe");
pcmk__rsc_info(history->rsc,
"Probe found %s active and promoted on %s at %s",
history->rsc->id,
pcmk__node_name(history->node), last_change_s);
free(last_change_s);
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
history->rsc->role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
history->rsc->role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
case PCMK_OCF_NOT_CONFIGURED:
remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
break;
case PCMK_OCF_UNIMPLEMENT_FEATURE:
{
guint interval_ms = 0;
crm_element_value_ms(history->xml, PCMK_META_INTERVAL,
&interval_ms);
if (interval_ms == 0) {
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
"exit status");
} else {
remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED,
"exit status");
}
}
break;
case PCMK_OCF_NOT_INSTALLED:
case PCMK_OCF_INVALID_PARAM:
case PCMK_OCF_INSUFFICIENT_PRIV:
if (!expired) {
block_if_unrecoverable(history);
}
remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status");
break;
default:
if (history->execution_status == PCMK_EXEC_DONE) {
char *last_change_s = last_change_str(history->xml);
crm_info("Treating unknown exit status %d from %s of %s "
"on %s at %s as failure",
history->exit_status, task, history->rsc->id,
pcmk__node_name(history->node), last_change_s);
remap_because(history, &why, PCMK_EXEC_ERROR,
"unknown exit status");
free(last_change_s);
}
break;
}
remap_done:
if (why != NULL) {
pcmk__rsc_trace(history->rsc,
"Remapped %s result from [%s: %s] to [%s: %s] "
"because of %s",
history->key, pcmk_exec_status_str(orig_exec_status),
crm_exit_str(orig_exit_status),
pcmk_exec_status_str(history->execution_status),
crm_exit_str(history->exit_status), why);
}
}
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
pcmk_resource_t *rsc, pcmk_node_t *node)
{
if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
rsc->cluster);
} else {
pcmk__op_digest_t *digest_data = NULL;
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->cluster);
switch (digest_data->rc) {
case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pcmk__xe_history_key(xml_op),
node->details->id);
break;
case pcmk__digest_match:
break;
default:
return TRUE;
}
}
}
return FALSE;
}
// Order action after fencing of remote node, given connection rsc
static void
order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
pcmk_scheduler_t *scheduler)
{
pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id);
if (remote_node) {
pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
FALSE, scheduler);
order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
* executor reports only changes in the monitor result, so if the
* monitor is still active and still getting the same failure result,
* that will go undetected after the failure is cleared.
*
* Also, the operation history will have the time when the recurring
* monitor result changed to the given code, not the time when the
* result last happened.
*
* @TODO We probably should clear such failures only when the failure
* timeout has passed since the last occurrence of the failed result.
* However we don't record that information. We could maybe approximate
* that by clearing only if there is a more recent successful monitor or
* stop result, but we don't even have that information at this point
* since we are still unpacking the resource's operation history.
*
* This is especially important for remote connection resources with a
* reconnect interval, so in that case, we skip clearing failures
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
&& pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
&& (interval_ms != 0)
&& pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pcmk_node_t *remote_node = pcmk_find_node(rsc->cluster, rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
crm_info("Waiting to clear monitor failure for remote node %s"
" until fencing has occurred", rsc->id);
}
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Check operation age and schedule failure clearing when appropriate
*
* This function has two distinct purposes. The first is to check whether an
* operation history entry is expired (i.e. the resource has a failure timeout,
* the entry is older than the timeout, and the resource either has no fail
* count or its fail count is entirely older than the timeout). The second is to
* schedule fail count clearing when appropriate (i.e. the operation is expired
* and either the resource has an expired fail count or the operation is a
* last_failure for a remote connection resource with a reconnect interval,
* or the operation is a last_failure for a start or monitor operation and the
* resource's parameters have changed since the operation).
*
* \param[in,out] history Parsed action result history
*
* \return true if operation history entry is expired, otherwise false
*/
static bool
check_operation_expiry(struct action_history *history)
{
bool expired = false;
bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0");
time_t last_run = 0;
int unexpired_fail_count = 0;
const char *clear_reason = NULL;
if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) {
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not expired: "
"Not Installed does not expire",
history->id, pcmk__node_name(history->node));
return false; // "Not installed" must always be cleared manually
}
if ((history->rsc->failure_timeout > 0)
&& (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE,
&last_run) == 0)) {
/* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a
* timestamp
*/
time_t now = get_effective_time(history->rsc->cluster);
time_t last_failure = 0;
// Is this particular operation history older than the failure timeout?
if ((now >= (last_run + history->rsc->failure_timeout))
&& !should_ignore_failure_timeout(history->rsc, history->task,
history->interval_ms,
is_last_failure)) {
expired = true;
}
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
&last_failure,
pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
" last-failure@%lld",
history->id, (long long) last_run, (expired? "" : "not "),
(long long) now, unexpired_fail_count,
history->rsc->failure_timeout, (long long) last_failure);
last_failure += history->rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
pe__update_recheck_time(last_failure, history->rsc->cluster,
"fail count expiration");
}
}
if (expired) {
if (pe_get_failcount(history->node, history->rsc, NULL,
pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
// There is no fail count considering timeout
clear_reason = "it expired";
} else {
/* This operation is old, but there is an unexpired fail count.
* In a properly functioning cluster, this should only be
* possible if this operation is not a failure (otherwise the
* fail count should be expired too), so this is really just a
* failsafe.
*/
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Unexpired fail count",
history->id, pcmk__node_name(history->node));
expired = false;
}
} else if (is_last_failure
&& (history->rsc->remote_reconnect_ms != 0)) {
/* Clear any expired last failure when reconnect interval is set,
* even if there is no fail count.
*/
clear_reason = "reconnect interval is set";
}
}
if (!expired && is_last_failure
&& should_clear_for_param_change(history->xml, history->task,
history->rsc, history->node)) {
clear_reason = "resource parameters have changed";
}
if (clear_reason != NULL) {
pcmk_action_t *clear_op = NULL;
// Schedule clearing of the fail count
clear_op = pe__clear_failcount(history->rsc, history->node,
clear_reason, history->rsc->cluster);
if (pcmk_is_set(history->rsc->cluster->flags,
pcmk_sched_fencing_enabled)
&& (history->rsc->remote_reconnect_ms != 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
* completes.
*
* We could limit this to remote_node->details->unclean, but at
* this point, that's always true (it won't be reliable until
* after unpack_node_history() is done).
*/
crm_info("Clearing %s failure will wait until any scheduled "
"fencing of %s completes",
history->task, history->rsc->id);
order_after_remote_fencing(clear_op, history->rsc,
history->rsc->cluster);
}
}
if (expired && (history->interval_ms == 0)
&& pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_PROMOTED:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_PROMOTED:
// Don't expire probes that return these values
pcmk__rsc_trace(history->rsc,
"Resource history entry %s on %s is not "
"expired: Probe result",
history->id, pcmk__node_name(history->node));
expired = false;
break;
}
}
return expired;
}
int
pe__target_rc_from_xml(const xmlNode *xml_op)
{
int target_rc = 0;
const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY);
if (key == NULL) {
return -1;
}
decode_transition_key(key, NULL, NULL, NULL, &target_rc);
return target_rc;
}
/*!
* \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
* \param[in] exit_status Exit status to base new state on
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
update_resource_state(struct action_history *history, int exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
bool clear_past_failure = false;
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
history->rsc->role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
if (history->rsc->role < pcmk_role_started) {
set_active(history->rsc);
}
} else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
history->rsc->role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
history->rsc->role = pcmk_role_stopped;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
history->rsc->role = pcmk_role_promoted;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
if (*on_fail == pcmk_on_fail_demote) {
/* Demote clears an error only if
* PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
*/
clear_past_failure = true;
}
history->rsc->role = pcmk_role_unpromoted;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
history->rsc->role = pcmk_role_started;
clear_past_failure = true;
} else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
} else if (history->rsc->role < pcmk_role_started) {
pcmk__rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pcmk__node_name(history->node));
set_active(history->rsc);
}
if (!clear_past_failure) {
return;
}
switch (*on_fail) {
case pcmk_on_fail_stop:
case pcmk_on_fail_ban:
case pcmk_on_fail_standby_node:
case pcmk_on_fail_fence_node:
pcmk__rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, pcmk_on_fail_text(*on_fail),
history->task);
break;
case pcmk_on_fail_block:
case pcmk_on_fail_ignore:
case pcmk_on_fail_demote:
case pcmk_on_fail_restart:
case pcmk_on_fail_restart_container:
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
case pcmk_on_fail_reset_remote:
if (history->rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
* completely stopped. (With a reconnect interval, we wait
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
*on_fail = pcmk_on_fail_ignore;
pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
}
}
/*!
* \internal
* \brief Check whether a given history entry matters for resource state
*
* \param[in] history Parsed action history entry
*
* \return true if action can affect resource state, otherwise false
*/
static inline bool
can_affect_state(struct action_history *history)
{
#if 0
/* @COMPAT It might be better to parse only actions we know we're interested
* in, rather than exclude a couple we don't. However that would be a
* behavioral change that should be done at a major or minor series release.
* Currently, unknown operations can affect whether a resource is considered
* active and/or failed.
*/
return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START, PCMK_ACTION_STOP,
PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
#else
return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
PCMK_ACTION_META_DATA, NULL);
#endif
}
/*!
* \internal
* \brief Unpack execution/exit status and exit reason from a history entry
*
* \param[in,out] history Action history entry to unpack
*
* \return Standard Pacemaker return code
*/
static int
unpack_action_result(struct action_history *history)
{
if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS,
&(history->execution_status)) < 0)
|| (history->execution_status < PCMK_EXEC_PENDING)
|| (history->execution_status > PCMK_EXEC_MAX)
|| (history->execution_status == PCMK_EXEC_CANCELLED)) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_OP_STATUS " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_OP_STATUS),
""));
return pcmk_rc_unpack_error;
}
if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE,
&(history->exit_status)) < 0)
|| (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) {
#if 0
/* @COMPAT We should ignore malformed entries, but since that would
* change behavior, it should be done at a major or minor series
* release.
*/
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"with invalid " PCMK__XA_RC_CODE " '%s'",
history->id, history->rsc->id,
pcmk__node_name(history->node),
pcmk__s(crm_element_value(history->xml,
PCMK__XA_RC_CODE),
""));
return pcmk_rc_unpack_error;
#else
history->exit_status = CRM_EX_ERROR;
#endif
}
history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Process an action history entry whose result expired
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
*
* \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the
* entry needs no further processing)
*/
static int
process_expired_result(struct action_history *history, int orig_exit_status)
{
if (!pcmk__is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
if (history->rsc->role <= pcmk_role_stopped) {
history->rsc->role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
history->id, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->exit_status == history->expected_exit_status) {
return pcmk_rc_undetermined; // Only failures expire
}
if (history->interval_ms == 0) {
crm_notice("Ignoring resource history entry %s for %s of %s on %s: "
"Expired failure",
history->id, history->task, history->rsc->id,
pcmk__node_name(history->node));
return pcmk_rc_ok;
}
if (history->node->details->online && !history->node->details->unclean) {
/* Reschedule the recurring action. schedule_cancel() won't work at
* this stage, so as a hacky workaround, forcibly change the restart
* digest so pcmk__check_action_config() does what we want later.
*
* @TODO We should skip this if there is a newer successful monitor.
* Also, this causes rescheduling only if the history entry
* has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure
* scheduler regression test doesn't, but that may not be a
* realistic scenario in production).
*/
crm_notice("Rescheduling %s-interval %s of %s on %s "
"after failure expired",
pcmk__readable_interval(history->interval_ms), history->task,
history->rsc->id, pcmk__node_name(history->node));
crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST,
"calculated-failure-timeout");
return pcmk_rc_ok;
}
return pcmk_rc_undetermined;
}
/*!
* \internal
* \brief Process a masked probe failure
*
* \param[in,out] history Parsed action history entry
* \param[in] orig_exit_status Action exit status before remapping
* \param[in] last_failure Resource's last_failure entry, if known
* \param[in,out] on_fail Resource's current failure handling
*/
static void
mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
pcmk_resource_t *ban_rsc = history->rsc;
if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
services_ocf_exitcode_str(orig_exit_status), history->rsc->id,
pcmk__node_name(history->node));
update_resource_state(history, history->expected_exit_status, last_failure,
on_fail);
crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname);
record_failed_op(history);
resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY,
"masked-probe-failure", history->rsc->cluster);
}
/*!
* \internal Check whether a given failure is for a given pending action
*
* \param[in] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*
* \return true if \p last_failure is failure of pending action in \p history,
* otherwise false
* \note Both \p history and \p last_failure must come from the same
* \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be
* the same.
*/
static bool
failure_is_newer(const struct action_history *history,
const xmlNode *last_failure)
{
guint failure_interval_ms = 0U;
long long failure_change = 0LL;
long long this_change = 0LL;
if (last_failure == NULL) {
return false; // Resource has no last_failure entry
}
if (!pcmk__str_eq(history->task,
crm_element_value(last_failure, PCMK_XA_OPERATION),
pcmk__str_none)) {
return false; // last_failure is for different action
}
if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL,
&failure_interval_ms) != pcmk_ok)
|| (history->interval_ms != failure_interval_ms)) {
return false; // last_failure is for action with different interval
}
if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE),
&this_change, 0LL) != pcmk_rc_ok)
|| (pcmk__scan_ll(crm_element_value(last_failure,
PCMK_XA_LAST_RC_CHANGE),
&failure_change, 0LL) != pcmk_rc_ok)
|| (failure_change < this_change)) {
return false; // Failure is not known to be newer
}
return true;
}
/*!
* \internal
* \brief Update a resource's role etc. for a pending action
*
* \param[in,out] history Parsed history entry for pending action
* \param[in] last_failure Resource's last_failure entry, if known
*/
static void
process_pending_action(struct action_history *history,
const xmlNode *last_failure)
{
/* For recurring monitors, a failure is recorded only in RSC_last_failure_0,
* and there might be a RSC_monitor_INTERVAL entry with the last successful
* or pending result.
*
* If last_failure contains the failure of the pending recurring monitor
* we're processing here, and is newer, the action is no longer pending.
* (Pending results have call ID -1, which sorts last, so the last failure
* if any should be known.)
*/
if (failure_is_newer(history, last_failure)) {
return;
}
if (strcmp(history->task, PCMK_ACTION_START) == 0) {
pcmk__set_rsc_flags(history->rsc, pcmk_rsc_start_pending);
set_active(history->rsc);
} else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
history->rsc->role = pcmk_role_promoted;
} else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
PCMK__META_MIGRATE_TARGET);
target = pcmk_find_node(history->rsc->cluster, migrate_target);
if (target != NULL) {
stop_action(history->rsc, target, FALSE);
}
}
if (history->rsc->pending_task != NULL) {
/* There should never be multiple pending actions, but as a failsafe,
* just remember the first one processed for display purposes.
*/
return;
}
if (pcmk_is_probe(history->task, history->interval_ms)) {
/* Pending probes are currently never displayed, even if pending
* operations are requested. If we ever want to change that,
* enable the below and the corresponding part of
* native.c:native_pending_task().
*/
#if 0
history->rsc->pending_task = strdup("probe");
history->rsc->pending_node = history->node;
#endif
} else {
history->rsc->pending_task = strdup(history->task);
history->rsc->pending_node = history->node;
}
}
static void
unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail)
{
int old_rc = 0;
bool expired = false;
pcmk_resource_t *parent = rsc;
enum rsc_role_e fail_role = pcmk_role_unknown;
enum action_fail_response failure_strategy = pcmk_on_fail_restart;
struct action_history history = {
.rsc = rsc,
.node = node,
.xml = xml_op,
.execution_status = PCMK_EXEC_UNKNOWN,
};
CRM_CHECK(rsc && node && xml_op, return);
history.id = pcmk__xe_id(xml_op);
if (history.id == NULL) {
pcmk__config_err("Ignoring resource history entry for %s on %s "
"without ID", rsc->id, pcmk__node_name(node));
return;
}
// Task and interval
history.task = crm_element_value(xml_op, PCMK_XA_OPERATION);
if (history.task == NULL) {
pcmk__config_err("Ignoring resource history entry %s for %s on %s "
"without " PCMK_XA_OPERATION,
history.id, rsc->id, pcmk__node_name(node));
return;
}
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms));
if (!can_affect_state(&history)) {
pcmk__rsc_trace(rsc,
"Ignoring resource history entry %s for %s on %s "
"with irrelevant action '%s'",
history.id, rsc->id, pcmk__node_name(node),
history.task);
return;
}
if (unpack_action_result(&history) != pcmk_rc_ok) {
return; // Error already logged
}
history.expected_exit_status = pe__target_rc_from_xml(xml_op);
history.key = pcmk__xe_history_key(xml_op);
crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id));
pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
history.id, history.task, history.call_id,
pcmk__node_name(node),
pcmk_exec_status_str(history.execution_status),
crm_exit_str(history.exit_status));
if (node->details->unclean) {
pcmk__rsc_trace(rsc,
"%s is running on %s, which is unclean (further action "
"depends on value of stop's on-fail attribute)",
rsc->id, pcmk__node_name(node));
}
expired = check_operation_expiry(&history);
old_rc = history.exit_status;
remap_operation(&history, on_fail, expired);
if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) {
goto done;
}
if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
mask_probe_failure(&history, old_rc, *last_failure, on_fail);
goto done;
}
if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
parent = uber_parent(rsc);
}
switch (history.execution_status) {
case PCMK_EXEC_PENDING:
process_pending_action(&history, *last_failure);
goto done;
case PCMK_EXEC_DONE:
update_resource_state(&history, history.exit_status, *last_failure,
on_fail);
goto done;
case PCMK_EXEC_NOT_INSTALLED:
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if (failure_strategy == pcmk_on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pcmk__node_name(node),
history.execution_status, history.exit_status,
history.id);
/* Also for printing it as "FAILED" by marking it as
* pcmk_rsc_failed later
*/
*on_fail = pcmk_on_fail_ban;
}
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->cluster);
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pcmk__is_pacemaker_remote_node(node)
&& pcmk_is_set(node->details->remote_rsc->flags,
pcmk_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
* fail-safe in case a bug or unusual circumstances do lead to
* that, ensure the remote connection is considered failed.
*/
pcmk__set_rsc_flags(node->details->remote_rsc,
pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
break; // Not done, do error handling
case PCMK_EXEC_ERROR:
case PCMK_EXEC_ERROR_HARD:
case PCMK_EXEC_ERROR_FATAL:
case PCMK_EXEC_TIMEOUT:
case PCMK_EXEC_NOT_SUPPORTED:
case PCMK_EXEC_INVALID:
break; // Not done, do error handling
default: // No other value should be possible at this point
break;
}
unpack_failure_handling(&history, &failure_strategy, &fail_role);
if ((failure_strategy == pcmk_on_fail_ignore)
|| ((failure_strategy == pcmk_on_fail_restart_container)
&& (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded "
CRM_XS " %s",
history.task, services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), rsc->id,
pcmk__node_name(node), last_change_s, history.id);
free(last_change_s);
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, PCMK_XA_UNAME, node->details->uname);
pcmk__set_rsc_flags(rsc, pcmk_rsc_ignore_failure);
record_failed_op(&history);
if ((failure_strategy == pcmk_on_fail_restart_container)
&& cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) {
*on_fail = failure_strategy;
}
} else {
unpack_rsc_op_failure(&history, failure_strategy, fail_role,
last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
if (history.exit_status == PCMK_OCF_NOT_INSTALLED) {
log_level = LOG_NOTICE;
}
do_crm_log(log_level,
"Preventing %s from restarting on %s because "
"of hard failure (%s%s%s) " CRM_XS " %s",
parent->id, pcmk__node_name(node),
services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, node, -PCMK_SCORE_INFINITY,
"hard-error", rsc->cluster);
} else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) {
pcmk__sched_err("Preventing %s from restarting anywhere because "
"of fatal failure (%s%s%s) " CRM_XS " %s",
parent->id,
services_ocf_exitcode_str(history.exit_status),
(pcmk__str_empty(history.exit_reason)? "" : ": "),
pcmk__s(history.exit_reason, ""), history.id);
resource_location(parent, NULL, -PCMK_SCORE_INFINITY,
"fatal-error", rsc->cluster);
}
}
done:
pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
rsc->id, pcmk__node_name(node), history.id,
pcmk_role_text(rsc->role),
pcmk_role_text(rsc->next_role));
}
static void
add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_UNAME, node->details->uname);
pcmk__insert_dup(node->details->attrs, CRM_ATTR_ID, node->details->id);
if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) {
scheduler->dc_node = node;
node->details->is_dc = TRUE;
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_TRUE);
} else {
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_IS_DC, PCMK_VALUE_FALSE);
}
cluster_name = g_hash_table_lookup(scheduler->config_hash,
PCMK_OPT_CLUSTER_NAME);
if (cluster_name) {
pcmk__insert_dup(node->details->attrs, CRM_ATTR_CLUSTER_NAME,
cluster_name);
}
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data,
node->details->attrs, NULL, overwrite,
scheduler);
pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_data,
node->details->utilization, NULL,
FALSE, scheduler);
if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL,
pcmk__rsc_node_current) == NULL) {
const char *site_name = pcmk__node_attr(node, "site-name", NULL,
pcmk__rsc_node_current);
if (site_name) {
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_SITE_NAME, site_name);
} else if (cluster_name) {
/* Default to cluster-name if unset */
pcmk__insert_dup(node->details->attrs,
CRM_ATTR_SITE_NAME, cluster_name);
}
}
}
static GList *
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GList *gIter = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry, NULL, NULL, NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
if (pcmk__xe_is(rsc_op, PCMK__XE_LRM_RSC_OP)) {
crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc);
crm_xml_add(rsc_op, PCMK_XA_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS,
NULL, NULL);
pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
CRM_CHECK(status != NULL, return NULL);
for (node_state = pcmk__xe_first_child(status, NULL, NULL, NULL);
node_state != NULL; node_state = pcmk__xe_next(node_state)) {
if (pcmk__xe_is(node_state, PCMK__XE_NODE_STATE)) {
const char *uname = crm_element_value(node_state, PCMK_XA_UNAME);
if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
continue;
}
this_node = pcmk_find_node(scheduler, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pcmk__is_pacemaker_remote_node(this_node)) {
determine_remote_online_status(scheduler, this_node);
} else {
determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
|| pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL,
NULL);
tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL,
NULL);
for (lrm_rsc = pcmk__xe_first_child(tmp, NULL, NULL, NULL);
lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) {
if (pcmk__xe_is(lrm_rsc, PCMK__XE_LRM_RESOURCE)) {
const char *rsc_id = crm_element_value(lrm_rsc,
PCMK_XA_ID);
if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
}
}
return output;
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Jul 8, 6:39 PM (2 h, 29 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2002724
Default Alt Text
(464 KB)

Event Timeline