diff --git a/cts/scheduler/exp/bug-1822.exp b/cts/scheduler/exp/bug-1822.exp
index 1206c97002..9960c68daa 100644
--- a/cts/scheduler/exp/bug-1822.exp
+++ b/cts/scheduler/exp/bug-1822.exp
@@ -1,185 +1,185 @@
-
+
-
+
diff --git a/cts/scheduler/exp/clone-anon-failcount.exp b/cts/scheduler/exp/clone-anon-failcount.exp
index 05312c2f44..a48f69b88d 100644
--- a/cts/scheduler/exp/clone-anon-failcount.exp
+++ b/cts/scheduler/exp/clone-anon-failcount.exp
@@ -1,515 +1,515 @@
-
+
diff --git a/cts/scheduler/exp/promoted-failed-demote-2.exp b/cts/scheduler/exp/promoted-failed-demote-2.exp
index 02b92508e7..81ed8df0be 100644
--- a/cts/scheduler/exp/promoted-failed-demote-2.exp
+++ b/cts/scheduler/exp/promoted-failed-demote-2.exp
@@ -1,195 +1,195 @@
-
+
diff --git a/cts/scheduler/exp/promoted-failed-demote.exp b/cts/scheduler/exp/promoted-failed-demote.exp
index e4fc706ac0..69e6b39ab4 100644
--- a/cts/scheduler/exp/promoted-failed-demote.exp
+++ b/cts/scheduler/exp/promoted-failed-demote.exp
@@ -1,456 +1,456 @@
-
+
diff --git a/cts/scheduler/exp/shutdown-lock-expiration.exp b/cts/scheduler/exp/shutdown-lock-expiration.exp
index 465f12b864..9941333b90 100644
--- a/cts/scheduler/exp/shutdown-lock-expiration.exp
+++ b/cts/scheduler/exp/shutdown-lock-expiration.exp
@@ -1,68 +1,68 @@
-
+
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index 99c4c5090c..95e35298bb 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1472 +1,1473 @@
/*
* Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#define REMOTE_LRMD_RA "remote"
/* The max start timeout before cmd retry */
#define MAX_START_TIMEOUT_MS 10000
#define cmd_set_flags(cmd, flags_to_set) do { \
(cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Remote command", (cmd)->rsc_id, (cmd)->status, \
(flags_to_set), #flags_to_set); \
} while (0)
#define cmd_clear_flags(cmd, flags_to_clear) do { \
(cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Remote command", (cmd)->rsc_id, (cmd)->status, \
(flags_to_clear), #flags_to_clear); \
} while (0)
enum remote_cmd_status {
cmd_reported_success = (1 << 0),
cmd_cancel = (1 << 1),
};
typedef struct remote_ra_cmd_s {
/*! the local node the cmd is issued from */
char *owner;
/*! the remote node the cmd is executed on */
char *rsc_id;
/*! the action to execute */
char *action;
/*! some string the client wants us to give it back */
char *userdata;
/*! start delay in ms */
int start_delay;
/*! timer id used for start delay. */
int delay_id;
/*! timeout in ms for cmd */
int timeout;
int remaining_timeout;
/*! recurring interval in ms */
guint interval_ms;
/*! interval timer id */
int interval_id;
int monitor_timeout_id;
int takeover_timeout_id;
/*! action parameters */
lrmd_key_value_t *params;
pcmk__action_result_t result;
int call_id;
time_t start_time;
uint32_t status;
} remote_ra_cmd_t;
#define lrm_remote_set_flags(lrm_state, flags_to_set) do { \
lrm_state_t *lrm = (lrm_state); \
remote_ra_data_t *ra = lrm->remote_ra_data; \
ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
lrm->node_name, ra->status, \
(flags_to_set), #flags_to_set); \
} while (0)
#define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \
lrm_state_t *lrm = (lrm_state); \
remote_ra_data_t *ra = lrm->remote_ra_data; \
ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
lrm->node_name, ra->status, \
(flags_to_clear), #flags_to_clear); \
} while (0)
enum remote_status {
expect_takeover = (1 << 0),
takeover_complete = (1 << 1),
remote_active = (1 << 2),
/* Maintenance mode is difficult to determine from the controller's context,
* so we have it signalled back with the transition from the scheduler.
*/
remote_in_maint = (1 << 3),
/* Similar for whether we are controlling a guest node or remote node.
* Fortunately there is a meta-attribute in the transition already and
* as the situation doesn't change over time we can use the
* resource start for noting down the information for later use when
* the attributes aren't at hand.
*/
controlling_guest = (1 << 4),
};
typedef struct remote_ra_data_s {
crm_trigger_t *work;
remote_ra_cmd_t *cur_cmd;
GList *cmds;
GList *recurring_cmds;
uint32_t status;
} remote_ra_data_t;
static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
static GList *fail_all_monitor_cmds(GList * list);
static void
free_cmd(gpointer user_data)
{
remote_ra_cmd_t *cmd = user_data;
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
}
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
}
if (cmd->takeover_timeout_id) {
g_source_remove(cmd->takeover_timeout_id);
}
free(cmd->owner);
free(cmd->rsc_id);
free(cmd->action);
free(cmd->userdata);
pcmk__reset_result(&(cmd->result));
lrmd_key_value_freeall(cmd->params);
free(cmd);
}
static int
generate_callid(void)
{
static int remote_ra_callid = 0;
remote_ra_callid++;
if (remote_ra_callid <= 0) {
remote_ra_callid = 1;
}
return remote_ra_callid;
}
static gboolean
recurring_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->interval_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static gboolean
start_delay_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->delay_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static bool
should_purge_attributes(crm_node_t *node)
{
bool purge = true;
crm_node_t *conn_node = NULL;
lrm_state_t *connection_rsc = NULL;
if (!node->conn_host) {
return purge;
}
/* Get the node that was hosting the remote connection resource from the
* peer cache. That's the one we really care about here.
*/
conn_node = crm_get_peer(0, node->conn_host);
if (conn_node == NULL) {
return purge;
}
/* Check the uptime of connection_rsc. If it hasn't been running long
* enough, set purge=true. "Long enough" means it started running earlier
* than the timestamp when we noticed it went away in the first place.
*/
connection_rsc = lrm_state_find(node->uname);
if (connection_rsc != NULL) {
lrmd_t *lrm = connection_rsc->conn;
time_t uptime = lrmd__uptime(lrm);
time_t now = time(NULL);
/* Add 20s of fuzziness to give corosync a while to notice the remote
* host is gone. On various error conditions (failure to get uptime,
* peer_lost isn't set) we default to purging.
*/
if (uptime > 0 &&
conn_node->peer_lost > 0 &&
uptime + 20 >= now - conn_node->peer_lost) {
purge = false;
}
}
return purge;
}
static enum controld_section_e
section_to_delete(bool purge)
{
if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
if (purge) {
return controld_section_all_unlocked;
} else {
return controld_section_lrm_unlocked;
}
} else {
if (purge) {
return controld_section_all;
} else {
return controld_section_lrm;
}
}
}
static void
purge_remote_node_attrs(int call_opt, crm_node_t *node)
{
bool purge = should_purge_attributes(node);
enum controld_section_e section = section_to_delete(purge);
/* Purge node from attrd's memory */
if (purge) {
update_attrd_remote_node_removed(node->uname, NULL);
}
controld_delete_node_state(node->uname, section, call_opt);
}
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node joining
*
* \param[in] node_name Name of newly integrated pacemaker_remote node
*/
static void
remote_node_up(const char *node_name)
{
int call_opt;
xmlNode *update, *state;
crm_node_t *node;
lrm_state_t *connection_rsc = NULL;
CRM_CHECK(node_name != NULL, return);
crm_info("Announcing Pacemaker Remote node %s", node_name);
call_opt = crmd_cib_smart_opt();
/* Delete node's probe_complete attribute. This serves two purposes:
*
* - @COMPAT DCs < 1.1.14 in a rolling upgrade might use it
* - deleting it (or any attribute for that matter) here ensures the
* attribute manager learns the node is remote
*/
update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
/* Ensure node is in the remote peer cache with member status */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
purge_remote_node_attrs(call_opt, node);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
/* Apply any start state that we were given from the environment on the
* remote node.
*/
connection_rsc = lrm_state_find(node->uname);
if (connection_rsc != NULL) {
lrmd_t *lrm = connection_rsc->conn;
const char *start_state = lrmd__node_start_state(lrm);
if (start_state) {
set_join_state(start_state, node->uname, node->uuid, true);
}
}
/* pacemaker_remote nodes don't participate in the membership layer,
* so cluster nodes don't automatically get notified when they come and go.
* We send a cluster message to the DC, and update the CIB node state entry,
* so the DC will get it sooner (via message) or later (via CIB refresh),
* and any other interested parties can query the CIB.
*/
broadcast_remote_state_message(node_name, true);
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
state = create_node_state_update(node, node_update_cluster, update,
__func__);
/* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
* needs to be fenced, this flag will allow various actions to determine
* whether the fencing has happened yet.
*/
crm_xml_add(state, XML_NODE_IS_FENCED, "0");
/* TODO: If the remote connection drops, and this (async) CIB update either
* failed or has not yet completed, later actions could mistakenly think the
* node has already been fenced (if the XML_NODE_IS_FENCED attribute was
* previously set, because it won't have been cleared). This could prevent
* actual fencing or allow recurring monitor failures to be cleared too
* soon. Ideally, we wouldn't rely on the CIB for the fenced status.
*/
controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL, NULL);
free_xml(update);
}
enum down_opts {
DOWN_KEEP_LRM,
DOWN_ERASE_LRM
};
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node leaving
*
* \param[in] node_name Name of lost node
* \param[in] opts Whether to keep or erase LRM history
*/
static void
remote_node_down(const char *node_name, const enum down_opts opts)
{
xmlNode *update;
int call_opt = crmd_cib_smart_opt();
crm_node_t *node;
/* Purge node from attrd's memory */
update_attrd_remote_node_removed(node_name, NULL);
/* Normally, only node attributes should be erased, and the resource history
* should be kept until the node comes back up. However, after a successful
* fence, we want to clear the history as well, so we don't think resources
* are still running on the node.
*/
if (opts == DOWN_ERASE_LRM) {
controld_delete_node_state(node_name, controld_section_all, call_opt);
} else {
controld_delete_node_state(node_name, controld_section_attrs, call_opt);
}
/* Ensure node is in the remote peer cache with lost state */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
/* Notify DC */
broadcast_remote_state_message(node_name, false);
/* Update CIB node state */
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
create_node_state_update(node, node_update_cluster, update, __func__);
controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL, NULL);
free_xml(update);
}
/*!
* \internal
* \brief Handle effects of a remote RA command on node state
*
* \param[in] cmd Completed remote RA command
*/
static void
check_remote_node_state(const remote_ra_cmd_t *cmd)
{
/* Only successful actions can change node state */
if (!pcmk__result_ok(&(cmd->result))) {
return;
}
if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
remote_node_up(cmd->rsc_id);
} else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_casei)) {
/* After a successful migration, we don't need to do remote_node_up()
* because the DC already knows the node is up, and we don't want to
* clear LRM history etc. We do need to add the remote node to this
* host's remote peer cache, because (unless it happens to be DC)
* it hasn't been tracking the remote node, and other code relies on
* the cache to distinguish remote nodes from unseen cluster nodes.
*/
crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
} else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
if (ra_data) {
if (!pcmk_is_set(ra_data->status, takeover_complete)) {
/* Stop means down if we didn't successfully migrate elsewhere */
remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
} else if (AM_I_DC == FALSE) {
/* Only the connection host and DC track node state,
* so if the connection migrated elsewhere and we aren't DC,
* un-cache the node, so we don't have stale info
*/
crm_remote_peer_cache_remove(cmd->rsc_id);
}
}
}
/* We don't do anything for successful monitors, which is correct for
* routine recurring monitors, and for monitors on nodes where the
* connection isn't supposed to be (the cluster will stop the connection in
* that case). However, if the initial probe finds the connection already
* active on the node where we want it, we probably should do
* remote_node_up(). Unfortunately, we can't distinguish that case here.
* Given that connections have to be initiated by the cluster, the chance of
* that should be close to zero.
*/
}
static void
report_remote_ra_result(remote_ra_cmd_t * cmd)
{
lrmd_event_data_t op = { 0, };
check_remote_node_state(cmd);
op.type = lrmd_event_exec_complete;
op.rsc_id = cmd->rsc_id;
op.op_type = cmd->action;
op.user_data = cmd->userdata;
op.timeout = cmd->timeout;
op.interval_ms = cmd->interval_ms;
op.t_run = (unsigned int) cmd->start_time;
op.t_rcchange = (unsigned int) cmd->start_time;
lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status,
cmd->result.exit_reason);
if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) {
op.t_rcchange = (unsigned int) time(NULL);
/* This edge case will likely never ever occur, but if it does the
* result is that a failure will not be processed correctly. This is only
* remotely possible because we are able to detect a connection resource's tcp
* connection has failed at any moment after start has completed. The actual
* recurring operation is just a connectivity ping.
*
* basically, we are not guaranteed that the first successful monitor op and
* a subsequent failed monitor op will not occur in the same timestamp. We have to
* make it look like the operations occurred at separate times though. */
if (op.t_rcchange == op.t_run) {
op.t_rcchange++;
}
}
if (cmd->params) {
lrmd_key_value_t *tmp;
op.params = pcmk__strkey_table(free, free);
for (tmp = cmd->params; tmp; tmp = tmp->next) {
g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
}
}
op.call_id = cmd->call_id;
op.remote_nodename = cmd->owner;
lrm_op_callback(&op);
if (op.params) {
g_hash_table_destroy(op.params);
}
lrmd__reset_result(&op);
}
static void
update_remaining_timeout(remote_ra_cmd_t * cmd)
{
cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
}
static gboolean
retry_start_cmd_cb(gpointer data)
{
lrm_state_t *lrm_state = data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd = NULL;
int rc = ETIME;
if (!ra_data || !ra_data->cur_cmd) {
return FALSE;
}
cmd = ra_data->cur_cmd;
if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
return FALSE;
}
update_remaining_timeout(cmd);
if (cmd->remaining_timeout > 0) {
rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
} else {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"Not enough time remains to retry remote connection");
}
if (rc != pcmk_rc_ok) {
report_remote_ra_result(cmd);
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
ra_data->cur_cmd = NULL;
free_cmd(cmd);
} else {
/* wait for connection event */
}
return FALSE;
}
static gboolean
connection_takeover_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
crm_info("takeover event timed out for node %s", cmd->rsc_id);
cmd->takeover_timeout_id = 0;
lrm_state = lrm_state_find(cmd->rsc_id);
handle_remote_ra_stop(lrm_state, cmd);
free_cmd(cmd);
return FALSE;
}
static gboolean
monitor_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
lrm_state = lrm_state_find(cmd->rsc_id);
crm_info("Timed out waiting for remote poke response from %s%s",
cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
cmd->monitor_timeout_id = 0;
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT,
"Remote executor did not respond");
if (lrm_state && lrm_state->remote_ra_data) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (ra_data->cur_cmd == cmd) {
ra_data->cur_cmd = NULL;
}
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
}
report_remote_ra_result(cmd);
free_cmd(cmd);
if(lrm_state) {
lrm_state_disconnect(lrm_state);
}
return FALSE;
}
static void
synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
{
lrmd_event_data_t op = { 0, };
if (lrm_state == NULL) {
/* if lrm_state not given assume local */
lrm_state = lrm_state_find(controld_globals.our_nodename);
}
CRM_ASSERT(lrm_state != NULL);
op.type = lrmd_event_exec_complete;
op.rsc_id = rsc_id;
op.op_type = op_type;
op.t_run = (unsigned int) time(NULL);
op.t_rcchange = op.t_run;
op.call_id = generate_callid();
lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
process_lrm_event(lrm_state, &op, NULL, NULL);
}
void
remote_lrm_op_callback(lrmd_event_data_t * op)
{
gboolean cmd_handled = FALSE;
lrm_state_t *lrm_state = NULL;
remote_ra_data_t *ra_data = NULL;
remote_ra_cmd_t *cmd = NULL;
crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
"(%d) status=%s (%d)",
(op->op_type? op->op_type : ""), (op->op_type? " " : ""),
lrmd_event_type2str(op->type), op->remote_nodename,
services_ocf_exitcode_str(op->rc), op->rc,
pcmk_exec_status_str(op->op_status), op->op_status);
lrm_state = lrm_state_find(op->remote_nodename);
if (!lrm_state || !lrm_state->remote_ra_data) {
crm_debug("No state information found for remote connection event");
return;
}
ra_data = lrm_state->remote_ra_data;
if (op->type == lrmd_event_new_client) {
// Another client has connected to the remote daemon
if (pcmk_is_set(ra_data->status, expect_takeover)) {
// Great, we knew this was coming
lrm_remote_clear_flags(lrm_state, expect_takeover);
lrm_remote_set_flags(lrm_state, takeover_complete);
} else {
crm_err("Disconnecting from Pacemaker Remote node %s due to "
"unexpected client takeover", op->remote_nodename);
/* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
/* Do not free lrm_state->conn yet. */
/* It'll be freed in the following stop action. */
lrm_state_disconnect_only(lrm_state);
}
return;
}
/* filter all EXEC events up */
if (op->type == lrmd_event_exec_complete) {
if (pcmk_is_set(ra_data->status, takeover_complete)) {
crm_debug("ignoring event, this connection is taken over by another node");
} else {
lrm_op_callback(op);
}
return;
}
if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
if (!pcmk_is_set(ra_data->status, remote_active)) {
crm_debug("Disconnection from Pacemaker Remote node %s complete",
lrm_state->node_name);
} else if (!remote_ra_is_in_maintenance(lrm_state)) {
crm_err("Lost connection to Pacemaker Remote node %s",
lrm_state->node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
} else {
crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
lrm_state->node_name);
/* Do roughly what a 'stop' on the remote-resource would do */
handle_remote_ra_stop(lrm_state, NULL);
remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
/* now fake the reply of a successful 'stop' */
synthesize_lrmd_success(NULL, lrm_state->node_name,
PCMK_ACTION_STOP);
}
return;
}
if (!ra_data->cur_cmd) {
crm_debug("no event to match");
return;
}
cmd = ra_data->cur_cmd;
/* Start actions and migrate from actions complete after connection
* comes back to us. */
if ((op->type == lrmd_event_connect)
&& pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
if (op->connection_rc < 0) {
update_remaining_timeout(cmd);
if ((op->connection_rc == -ENOKEY)
|| (op->connection_rc == -EKEYREJECTED)) {
// Hard error, don't retry
pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM,
PCMK_EXEC_ERROR,
pcmk_strerror(op->connection_rc));
} else if (cmd->remaining_timeout > 3000) {
crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
return;
} else {
crm_trace("can't reschedule start, remaining timeout too small %d",
cmd->remaining_timeout);
pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"%s without enough time to retry",
pcmk_strerror(op->connection_rc));
}
} else {
lrm_state_reset_tables(lrm_state, TRUE);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
lrm_remote_set_flags(lrm_state, remote_active);
}
crm_debug("Remote connection event matched %s action", cmd->action);
report_remote_ra_result(cmd);
cmd_handled = TRUE;
} else if ((op->type == lrmd_event_poke)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
cmd->monitor_timeout_id = 0;
}
/* Only report success the first time, after that only worry about failures.
* For this function, if we get the poke pack, it is always a success. Pokes
* only fail if the send fails, or the response times out. */
if (!pcmk_is_set(cmd->status, cmd_reported_success)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
cmd_set_flags(cmd, cmd_reported_success);
}
crm_debug("Remote poke event matched %s action", cmd->action);
/* success, keep rescheduling if interval is present. */
if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) {
ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
cmd->interval_id = g_timeout_add(cmd->interval_ms,
recurring_helper, cmd);
cmd = NULL; /* prevent free */
}
cmd_handled = TRUE;
} else if ((op->type == lrmd_event_disconnect)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
if (pcmk_is_set(ra_data->status, remote_active) &&
!pcmk_is_set(cmd->status, cmd_cancel)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR,
"Remote connection unexpectedly dropped "
"during monitor");
report_remote_ra_result(cmd);
crm_err("Remote connection to %s unexpectedly dropped during monitor",
lrm_state->node_name);
}
cmd_handled = TRUE;
} else if ((op->type == lrmd_event_new_client)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
pcmk__str_casei)) {
handle_remote_ra_stop(lrm_state, cmd);
cmd_handled = TRUE;
} else {
crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
}
if (cmd_handled) {
ra_data->cur_cmd = NULL;
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
free_cmd(cmd);
}
}
static void
handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
{
remote_ra_data_t *ra_data = NULL;
CRM_ASSERT(lrm_state);
ra_data = lrm_state->remote_ra_data;
if (!pcmk_is_set(ra_data->status, takeover_complete)) {
/* delete pending ops when ever the remote connection is intentionally stopped */
g_hash_table_remove_all(lrm_state->active_ops);
} else {
/* we no longer hold the history if this connection has been migrated,
* however, we keep metadata cache for future use */
lrm_state_reset_tables(lrm_state, FALSE);
}
lrm_remote_clear_flags(lrm_state, remote_active);
lrm_state_disconnect(lrm_state);
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
ra_data->cmds = NULL;
ra_data->recurring_cmds = NULL;
ra_data->cur_cmd = NULL;
if (cmd) {
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
}
}
// \return Standard Pacemaker return code
static int
handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
{
const char *server = NULL;
lrmd_key_value_t *tmp = NULL;
int port = 0;
int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
int rc = pcmk_rc_ok;
for (tmp = cmd->params; tmp; tmp = tmp->next) {
if (pcmk__strcase_any_of(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR,
XML_RSC_ATTR_REMOTE_RA_SERVER, NULL)) {
server = tmp->value;
} else if (pcmk__str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT, pcmk__str_casei)) {
port = atoi(tmp->value);
} else if (pcmk__str_eq(tmp->key, CRM_META "_" XML_RSC_ATTR_CONTAINER, pcmk__str_casei)) {
lrm_remote_set_flags(lrm_state, controlling_guest);
}
}
rc = controld_connect_remote_executor(lrm_state, server, port,
timeout_used);
if (rc != pcmk_rc_ok) {
pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR,
"Could not connect to Pacemaker Remote node %s: %s",
lrm_state->node_name, pcmk_rc_str(rc));
}
return rc;
}
static gboolean
handle_remote_ra_exec(gpointer user_data)
{
int rc = 0;
lrm_state_t *lrm_state = user_data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd;
GList *first = NULL;
if (ra_data->cur_cmd) {
/* still waiting on previous cmd */
return TRUE;
}
while (ra_data->cmds) {
first = ra_data->cmds;
cmd = first->data;
if (cmd->delay_id) {
/* still waiting for start delay timer to trip */
return TRUE;
}
ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
g_list_free_1(first);
if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
if (handle_remote_ra_start(lrm_state, cmd,
cmd->timeout) == pcmk_rc_ok) {
/* take care of this later when we get async connection result */
crm_debug("Initiated async remote connection, %s action will complete after connect event",
cmd->action);
ra_data->cur_cmd = cmd;
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) {
if (lrm_state_is_connected(lrm_state) == TRUE) {
rc = lrm_state_poke_connection(lrm_state);
if (rc < 0) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, pcmk_strerror(rc));
}
} else {
rc = -1;
pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING,
PCMK_EXEC_DONE, "Remote connection inactive");
}
if (rc == 0) {
crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
cmd->rsc_id);
ra_data->cur_cmd = cmd;
cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) {
if (pcmk_is_set(ra_data->status, expect_takeover)) {
/* briefly wait on stop for the takeover event to occur. If the
* takeover event does not occur during the wait period, that's fine.
* It just means that the remote-node's lrm_status section is going to get
* cleared which will require all the resources running in the remote-node
* to be explicitly re-detected via probe actions. If the takeover does occur
* successfully, then we can leave the status section intact. */
cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
ra_data->cur_cmd = cmd;
return TRUE;
}
handle_remote_ra_stop(lrm_state, cmd);
} else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) {
lrm_remote_clear_flags(lrm_state, takeover_complete);
lrm_remote_set_flags(lrm_state, expect_takeover);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
} else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD,
PCMK_ACTION_RELOAD_AGENT, NULL)) {
/* Currently the only reloadable parameter is reconnect_interval,
* which is only used by the scheduler via the CIB, so reloads are a
* no-op.
*
* @COMPAT DC <2.1.0: We only need to check for "reload" in case
* we're in a rolling upgrade with a DC scheduling "reload" instead
* of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
* so this would work for that purpose as well.
*/
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
}
free_cmd(cmd);
}
return TRUE;
}
static void
remote_ra_data_init(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = NULL;
if (lrm_state->remote_ra_data) {
return;
}
ra_data = calloc(1, sizeof(remote_ra_data_t));
ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
lrm_state->remote_ra_data = ra_data;
}
void
remote_ra_cleanup(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (!ra_data) {
return;
}
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
mainloop_destroy_trigger(ra_data->work);
free(ra_data);
lrm_state->remote_ra_data = NULL;
}
gboolean
is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
{
if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
return TRUE;
}
if ((id != NULL) && (lrm_state_find(id) != NULL)
&& !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) {
return TRUE;
}
return FALSE;
}
lrmd_rsc_info_t *
remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
{
lrmd_rsc_info_t *info = NULL;
if ((lrm_state_find(rsc_id))) {
info = calloc(1, sizeof(lrmd_rsc_info_t));
info->id = strdup(rsc_id);
info->type = strdup(REMOTE_LRMD_RA);
info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
info->provider = strdup("pacemaker");
}
return info;
}
static gboolean
is_remote_ra_supported_action(const char *action)
{
return pcmk__str_any_of(action,
PCMK_ACTION_START,
PCMK_ACTION_STOP,
PCMK_ACTION_MONITOR,
PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM,
PCMK_ACTION_RELOAD_AGENT,
PCMK_ACTION_RELOAD,
NULL);
}
static GList *
fail_all_monitor_cmds(GList * list)
{
GList *rm_list = NULL;
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms > 0)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
rm_list = g_list_append(rm_list, cmd);
}
}
for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, "Lost connection to remote executor");
crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
report_remote_ra_result(cmd);
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
/* frees only the list data, not the cmds */
g_list_free(rm_list);
return list;
}
static GList *
remove_cmd(GList * list, const char *action, guint interval_ms)
{
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
break;
}
cmd = NULL;
}
if (cmd) {
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
return list;
}
int
remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, guint interval_ms)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_data_t *ra_data = NULL;
connection_rsc = lrm_state_find(rsc_id);
if (!connection_rsc || !connection_rsc->remote_ra_data) {
return -EINVAL;
}
ra_data = connection_rsc->remote_ra_data;
ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
interval_ms);
if (ra_data->cur_cmd &&
(ra_data->cur_cmd->interval_ms == interval_ms) &&
(pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
cmd_set_flags(ra_data->cur_cmd, cmd_cancel);
}
return 0;
}
static remote_ra_cmd_t *
handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
const char *userdata)
{
GList *gIter = NULL;
remote_ra_cmd_t *cmd = NULL;
/* there are 3 places a potential duplicate monitor operation
* could exist.
* 1. recurring_cmds list. where the op is waiting for its next interval
* 2. cmds list, where the op is queued to get executed immediately
* 3. cur_cmd, which means the monitor op is in flight right now.
*/
if (interval_ms == 0) {
return NULL;
}
if (ra_data->cur_cmd &&
!pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
(ra_data->cur_cmd->interval_ms == interval_ms)
&& pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
cmd = ra_data->cur_cmd;
goto handle_dup;
}
for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
goto handle_dup;
}
}
for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
goto handle_dup;
}
}
return NULL;
handle_dup:
crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms);
/* update the userdata */
if (userdata) {
free(cmd->userdata);
cmd->userdata = strdup(userdata);
}
/* if we've already reported success, generate a new call id */
if (pcmk_is_set(cmd->status, cmd_reported_success)) {
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
cmd_clear_flags(cmd, cmd_reported_success);
}
/* if we have an interval_id set, that means we are in the process of
* waiting for this cmd's next interval. instead of waiting, cancel
* the timer and execute the action immediately */
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
cmd->interval_id = 0;
recurring_helper(cmd);
}
return cmd;
}
/*!
* \internal
* \brief Execute an action using the (internal) ocf:pacemaker:remote agent
*
* \param[in] lrm_state Executor state object for remote connection
* \param[in] rsc_id Connection resource ID
* \param[in] action Action to execute
* \param[in] userdata String to copy and pass to execution callback
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in] timeout_ms Action timeout (in milliseconds)
* \param[in] start_delay_ms Delay (in milliseconds) before executing action
* \param[in,out] params Connection resource parameters
* \param[out] call_id Where to store call ID on success
*
* \return Standard Pacemaker return code
* \note This takes ownership of \p params, which should not be used or freed
* after calling this function.
*/
int
controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id,
const char *action, const char *userdata,
guint interval_ms, int timeout_ms,
int start_delay_ms, lrmd_key_value_t *params,
int *call_id)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_cmd_t *cmd = NULL;
remote_ra_data_t *ra_data = NULL;
*call_id = 0;
CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL)
&& (userdata != NULL) && (call_id != NULL),
lrmd_key_value_freeall(params); return EINVAL);
if (!is_remote_ra_supported_action(action)) {
lrmd_key_value_freeall(params);
return EOPNOTSUPP;
}
connection_rsc = lrm_state_find(rsc_id);
if (connection_rsc == NULL) {
lrmd_key_value_freeall(params);
return ENOTCONN;
}
remote_ra_data_init(connection_rsc);
ra_data = connection_rsc->remote_ra_data;
cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
if (cmd) {
*call_id = cmd->call_id;
lrmd_key_value_freeall(params);
return pcmk_rc_ok;
}
cmd = calloc(1, sizeof(remote_ra_cmd_t));
if (cmd == NULL) {
lrmd_key_value_freeall(params);
return ENOMEM;
}
cmd->owner = strdup(lrm_state->node_name);
cmd->rsc_id = strdup(rsc_id);
cmd->action = strdup(action);
cmd->userdata = strdup(userdata);
if ((cmd->owner == NULL) || (cmd->rsc_id == NULL) || (cmd->action == NULL)
|| (cmd->userdata == NULL)) {
free_cmd(cmd);
lrmd_key_value_freeall(params);
return ENOMEM;
}
cmd->interval_ms = interval_ms;
cmd->timeout = timeout_ms;
cmd->start_delay = start_delay_ms;
cmd->params = params;
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
if (cmd->start_delay) {
cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
*call_id = cmd->call_id;
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Immediately fail all monitors of a remote node, if proxied here
*
* \param[in] node_name Name of pacemaker_remote node
*/
void
remote_ra_fail(const char *node_name)
{
lrm_state_t *lrm_state = lrm_state_find(node_name);
if (lrm_state && lrm_state_is_connected(lrm_state)) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
crm_info("Failing monitors on Pacemaker Remote node %s", node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
}
}
/* A guest node fencing implied by host fencing looks like:
*
*
*
*
*
*
*
*/
#define XPATH_PSEUDO_FENCE "/" XML_GRAPH_TAG_PSEUDO_EVENT \
"[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \
"/" XML_CIB_TAG_NODE
/*!
* \internal
* \brief Check a pseudo-action for Pacemaker Remote node side effects
*
* \param[in,out] xml XML of pseudo-action to check
*/
void
remote_ra_process_pseudo(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
if (numXpathResults(search) == 1) {
xmlNode *result = getXpathResult(search, 0);
/* Normally, we handle the necessary side effects of a guest node stop
* action when reporting the remote agent's result. However, if the stop
* is implied due to fencing, it will be a fencing pseudo-event, and
* there won't be a result to report. Handle that case here.
*
* This will result in a duplicate call to remote_node_down() if the
* guest stop was real instead of implied, but that shouldn't hurt.
*
* There is still one corner case that isn't handled: if a guest node
* isn't running any resources when its host is fenced, it will appear
* to be cleanly stopped, so there will be no pseudo-fence, and our
* peer cache state will be incorrect unless and until the guest is
* recovered.
*/
if (result) {
const char *remote = ID(result);
if (remote) {
remote_node_down(remote, DOWN_ERASE_LRM);
}
}
}
freeXpathObject(search);
}
static void
remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
{
xmlNode *update, *state;
int call_opt;
crm_node_t *node;
call_opt = crmd_cib_smart_opt();
node = crm_remote_peer_get(lrm_state->node_name);
CRM_CHECK(node != NULL, return);
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
state = create_node_state_update(node, node_update_none, update,
__func__);
crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
if (controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL,
NULL) == pcmk_rc_ok) {
/* TODO: still not 100% sure that async update will succeed ... */
if (maintenance) {
lrm_remote_set_flags(lrm_state, remote_in_maint);
} else {
lrm_remote_clear_flags(lrm_state, remote_in_maint);
}
}
free_xml(update);
}
#define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
"[@" XML_LRM_ATTR_TASK "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \
XML_GRAPH_TAG_MAINTENANCE
/*!
* \internal
* \brief Check a pseudo-action holding updates for maintenance state
*
* \param[in,out] xml XML of pseudo-action to check
*/
void
remote_ra_process_maintenance_nodes(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
if (numXpathResults(search) == 1) {
xmlNode *node;
int cnt = 0, cnt_remote = 0;
- for (node =
- first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
- node != NULL; node = pcmk__xml_next(node)) {
+ for (node = first_named_child(getXpathResult(search, 0),
+ XML_CIB_TAG_NODE);
+ node != NULL; node = crm_next_same_xml(node)) {
+
lrm_state_t *lrm_state = lrm_state_find(ID(node));
cnt++;
if (lrm_state && lrm_state->remote_ra_data &&
pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) {
int is_maint;
cnt_remote++;
pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE),
&is_maint, 0);
remote_ra_maintenance(lrm_state, is_maint);
}
}
crm_trace("Action holds %d nodes (%d remotes found) "
"adjusting maintenance-mode", cnt, cnt_remote);
}
freeXpathObject(search);
}
gboolean
remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return pcmk_is_set(ra_data->status, remote_in_maint);
}
gboolean
remote_ra_controlling_guest(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return pcmk_is_set(ra_data->status, controlling_guest);
}
diff --git a/doc/sphinx/Pacemaker_Explained/index.rst b/doc/sphinx/Pacemaker_Explained/index.rst
index de2ddd9434..36504b66e5 100644
--- a/doc/sphinx/Pacemaker_Explained/index.rst
+++ b/doc/sphinx/Pacemaker_Explained/index.rst
@@ -1,41 +1,42 @@
Pacemaker Explained
===================
*Configuring Pacemaker Clusters*
Abstract
--------
This document definitively explains Pacemaker's features and capabilities,
particularly the XML syntax used in Pacemaker's Cluster Information Base (CIB).
Table of Contents
-----------------
.. toctree::
:maxdepth: 3
:numbered:
intro
options
nodes
resources
+ operations
constraints
fencing
alerts
rules
advanced-options
advanced-resources
reusing-configuration
utilization
acls
status
multi-site-clusters
ap-samples
Index
-----
* :ref:`genindex`
* :ref:`search`
diff --git a/doc/sphinx/Pacemaker_Explained/operations.rst b/doc/sphinx/Pacemaker_Explained/operations.rst
new file mode 100644
index 0000000000..ddc43cde34
--- /dev/null
+++ b/doc/sphinx/Pacemaker_Explained/operations.rst
@@ -0,0 +1,385 @@
+.. index::
+ single: resource; action
+ single: resource; operation
+
+.. _operation:
+
+Resource Operations
+-------------------
+
+*Operations* are actions the cluster can perform on a resource by calling the
+resource agent. Resource agents must support certain common operations such as
+start, stop, and monitor, and may implement any others.
+
+Operations may be explicitly configured for two purposes: to override defaults
+for options (such as timeout) that the cluster will use whenever it initiates
+the operation, and to run an operation on a recurring basis (for example, to
+monitor the resource for failure).
+
+.. topic:: An OCF resource with a non-default start timeout
+
+ .. code-block:: xml
+
+
+
+
+
+
+
+
+
+
+Pacemaker identifies operations by a combination of name and interval, so this
+combination must be unique for each resource. That is, you should not configure
+two operations for the same resource with the same name and interval.
+
+.. _operation_properties:
+
+Operation Properties
+####################
+
+Operation properties may be specified directly in the ``op`` element as
+XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements.
+XML attributes take precedence over ``nvpair`` elements if both are specified.
+
+.. table:: **Properties of an Operation**
+ :class: longtable
+ :widths: 1 2 3
+
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | Field | Default | Description |
+ +================+===================================+=====================================================+
+ | id | | .. index:: |
+ | | | single: id; action property |
+ | | | single: action; property, id |
+ | | | |
+ | | | A unique name for the operation. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | name | | .. index:: |
+ | | | single: name; action property |
+ | | | single: action; property, name |
+ | | | |
+ | | | The action to perform. This can be any action |
+ | | | supported by the agent; common values include |
+ | | | ``monitor``, ``start``, and ``stop``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | interval | 0 | .. index:: |
+ | | | single: interval; action property |
+ | | | single: action; property, interval |
+ | | | |
+ | | | How frequently (in seconds) to perform the |
+ | | | operation. A value of 0 means "when needed". |
+ | | | A positive value defines a *recurring action*, |
+ | | | which is typically used with |
+ | | | :ref:`monitor `. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | timeout | | .. index:: |
+ | | | single: timeout; action property |
+ | | | single: action; property, timeout |
+ | | | |
+ | | | How long to wait before declaring the action |
+ | | | has failed |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | on-fail | Varies by action: | .. index:: |
+ | | | single: on-fail; action property |
+ | | * ``stop``: ``fence`` if | single: action; property, on-fail |
+ | | ``stonith-enabled`` is true | |
+ | | or ``block`` otherwise | The action to take if this action ever fails. |
+ | | * ``demote``: ``on-fail`` of the | Allowed values: |
+ | | ``monitor`` action with | |
+ | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. |
+ | | if present, enabled, and | * ``block:`` Don't perform any further operations |
+ | | configured to a value other | on the resource. |
+ | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start |
+ | | otherwise | it elsewhere. |
+ | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a |
+ | | | full restart. This is valid only for ``promote`` |
+ | | | actions, and for ``monitor`` actions with both |
+ | | | a nonzero ``interval`` and ``role`` set to |
+ | | | ``Promoted``; for any other action, a |
+ | | | configuration error will be logged, and the |
+ | | | default behavior will be used. *(since 2.0.5)* |
+ | | | * ``restart:`` Stop the resource and start it |
+ | | | again (possibly on a different node). |
+ | | | * ``fence:`` STONITH the node on which the |
+ | | | resource failed. |
+ | | | * ``standby:`` Move *all* resources away from the |
+ | | | node on which the resource failed. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | enabled | TRUE | .. index:: |
+ | | | single: enabled; action property |
+ | | | single: action; property, enabled |
+ | | | |
+ | | | If ``false``, ignore this operation definition. |
+ | | | This is typically used to pause a particular |
+ | | | recurring ``monitor`` operation; for instance, it |
+ | | | can complement the respective resource being |
+ | | | unmanaged (``is-managed=false``), as this alone |
+ | | | will :ref:`not block any configured monitoring |
+ | | | `. Disabling the operation |
+ | | | does not suppress all actions of the given type. |
+ | | | Allowed values: ``true``, ``false``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | record-pending | TRUE | .. index:: |
+ | | | single: record-pending; action property |
+ | | | single: action; property, record-pending |
+ | | | |
+ | | | If ``true``, the intention to perform the operation |
+ | | | is recorded so that GUIs and CLI tools can indicate |
+ | | | that an operation is in progress. This is best set |
+ | | | as an *operation default* |
+ | | | (see :ref:`s-operation-defaults`). Allowed values: |
+ | | | ``true``, ``false``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | role | | .. index:: |
+ | | | single: role; action property |
+ | | | single: action; property, role |
+ | | | |
+ | | | Run the operation only on node(s) that the cluster |
+ | | | thinks should be in the specified role. This only |
+ | | | makes sense for recurring ``monitor`` operations. |
+ | | | Allowed (case-sensitive) values: ``Stopped``, |
+ | | | ``Started``, and in the case of :ref:`promotable |
+ | | | clone resources `, |
+ | | | ``Unpromoted`` and ``Promoted``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+
+.. note::
+
+ When ``on-fail`` is set to ``demote``, recovery from failure by a successful
+ demote causes the cluster to recalculate whether and where a new instance
+ should be promoted. The node with the failure is eligible, so if promotion
+ scores have not changed, it will be promoted again.
+
+ There is no direct equivalent of ``migration-threshold`` for the promoted
+ role, but the same effect can be achieved with a location constraint using a
+ :ref:`rule ` with a node attribute expression for the resource's fail
+ count.
+
+ For example, to immediately ban the promoted role from a node with any
+ failed promote or promoted instance monitor:
+
+ .. code-block:: xml
+
+
+
+
+
+
+
+
+ This example assumes that there is a promotable clone of the ``my_primitive``
+ resource (note that the primitive name, not the clone name, is used in the
+ rule), and that there is a recurring 10-second-interval monitor configured for
+ the promoted role (fail count attributes specify the interval in
+ milliseconds).
+
+.. _s-resource-monitoring:
+
+Monitoring Resources for Failure
+################################
+
+When Pacemaker first starts a resource, it runs one-time ``monitor`` operations
+(referred to as *probes*) to ensure the resource is running where it's
+supposed to be, and not running where it's not supposed to be. (This behavior
+can be affected by the ``resource-discovery`` location constraint property.)
+
+Other than those initial probes, Pacemaker will *not* (by default) check that
+the resource continues to stay healthy [#]_. You must configure ``monitor``
+operations explicitly to perform these checks.
+
+.. topic:: An OCF resource with a recurring health check
+
+ .. code-block:: xml
+
+
+
+
+
+
+
+
+
+
+
+By default, a ``monitor`` operation will ensure that the resource is running
+where it is supposed to. The ``target-role`` property can be used for further
+checking.
+
+For example, if a resource has one ``monitor`` operation with
+``interval=10 role=Started`` and a second ``monitor`` operation with
+``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes
+it thinks *should* be running the resource, and the second monitor on any nodes
+that it thinks *should not* be running the resource (for the truly paranoid,
+who want to know when an administrator manually starts a service by mistake).
+
+.. note::
+
+ Currently, monitors with ``role=Stopped`` are not implemented for
+ :ref:`clone ` resources.
+
+.. _s-monitoring-unmanaged:
+
+Monitoring Resources When Administration is Disabled
+####################################################
+
+Recurring ``monitor`` operations behave differently under various administrative
+settings:
+
+* When a resource is unmanaged (by setting ``is-managed=false``): No monitors
+ will be stopped.
+
+ If the unmanaged resource is stopped on a node where the cluster thinks it
+ should be running, the cluster will detect and report that it is not, but it
+ will not consider the monitor failed, and will not try to start the resource
+ until it is managed again.
+
+ Starting the unmanaged resource on a different node is strongly discouraged
+ and will at least cause the cluster to consider the resource failed, and
+ may require the resource's ``target-role`` to be set to ``Stopped`` then
+ ``Started`` to be recovered.
+
+* When a resource is put into maintenance mode (by setting
+ ``maintenance=true``): The resource will be marked as unmanaged. (This
+ overrides ``is-managed=true``.)
+
+ Additionally, all monitor operations will be stopped, except those specifying
+ ``role`` as ``Stopped`` (which will be newly initiated if appropriate). As
+ with unmanaged resources in general, starting a resource on a node other than
+ where the cluster expects it to be will cause problems.
+
+* When a node is put into standby: All resources will be moved away from the
+ node, and all ``monitor`` operations will be stopped on the node, except those
+ specifying ``role`` as ``Stopped`` (which will be newly initiated if
+ appropriate).
+
+* When a node is put into maintenance mode: All resources that are active on the
+ node will be marked as in maintenance mode. See above for more details.
+
+* When the cluster is put into maintenance mode: All resources in the cluster
+ will be marked as in maintenance mode. See above for more details.
+
+A resource is in maintenance mode if the cluster, the node where the resource
+is active, or the resource itself is configured to be in maintenance mode. If a
+resource is in maintenance mode, then it is also unmanaged. However, if a
+resource is unmanaged, it is not necessarily in maintenance mode.
+
+.. _s-operation-defaults:
+
+Setting Global Defaults for Operations
+######################################
+
+You can change the global default values for operation properties
+in a given cluster. These are defined in an ``op_defaults`` section
+of the CIB's ``configuration`` section, and can be set with
+``crm_attribute``. For example,
+
+.. code-block:: none
+
+ # crm_attribute --type op_defaults --name timeout --update 20s
+
+would default each operation's ``timeout`` to 20 seconds. If an
+operation's definition also includes a value for ``timeout``, then that
+value would be used for that operation instead.
+
+When Implicit Operations Take a Long Time
+#########################################
+
+The cluster will always perform a number of implicit operations: ``start``,
+``stop`` and a non-recurring ``monitor`` operation used at startup to check
+whether the resource is already active. If one of these is taking too long,
+then you can create an entry for them and specify a longer timeout.
+
+.. topic:: An OCF resource with custom timeouts for its implicit actions
+
+ .. code-block:: xml
+
+
+
+
+
+
+
+
+
+
+
+
+Multiple Monitor Operations
+###########################
+
+Provided no two operations (for a single resource) have the same name
+and interval, you can have as many ``monitor`` operations as you like.
+In this way, you can do a superficial health check every minute and
+progressively more intense ones at higher intervals.
+
+To tell the resource agent what kind of check to perform, you need to
+provide each monitor with a different value for a common parameter.
+The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL``
+for this purpose and dictates that it is "made available to the
+resource agent without the normal ``OCF_RESKEY`` prefix".
+
+Whatever name you choose, you can specify it by adding an
+``instance_attributes`` block to the ``op`` tag. It is up to each
+resource agent to look for the parameter and decide how to use it.
+
+.. topic:: An OCF resource with two recurring health checks, performing
+ different levels of checks specified via ``OCF_CHECK_LEVEL``.
+
+ .. code-block:: xml
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Disabling a Monitor Operation
+#############################
+
+The easiest way to stop a recurring monitor is to just delete it.
+However, there can be times when you only want to disable it
+temporarily. In such cases, simply add ``enabled=false`` to the
+operation's definition.
+
+.. topic:: Example of an OCF resource with a disabled health check
+
+ .. code-block:: xml
+
+
+
+
+
+
+
+
+
+
+This can be achieved from the command line by executing:
+
+.. code-block:: none
+
+ # cibadmin --modify --xml-text ''
+
+Once you've done whatever you needed to do, you can then re-enable it with
+
+.. code-block:: none
+
+ # cibadmin --modify --xml-text ''
+
+.. [#] Currently, anyway. Automatic monitoring operations may be added in a future
+ version of Pacemaker.
diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst
index 3b7520febc..de511becff 100644
--- a/doc/sphinx/Pacemaker_Explained/resources.rst
+++ b/doc/sphinx/Pacemaker_Explained/resources.rst
@@ -1,1074 +1,688 @@
.. _resource:
Cluster Resources
-----------------
.. _s-resource-primitive:
What is a Cluster Resource?
###########################
.. index::
single: resource
A *resource* is a service managed by Pacemaker. The simplest type of resource,
a *primitive*, is described in this chapter. More complex forms, such as groups
and clones, are described in later chapters.
Every primitive has a *resource agent* that provides Pacemaker a standardized
interface for managing the service. This allows Pacemaker to be agnostic about
the services it manages. Pacemaker doesn't need to understand how the service
works because it relies on the resource agent to do the right thing when asked.
Every resource has a *class* specifying the standard that its resource agent
follows, and a *type* identifying the specific service being managed.
.. _s-resource-supported:
.. index::
single: resource; class
Resource Classes
################
Pacemaker supports several classes, or standards, of resource agents:
* OCF
* LSB
* Systemd
* Service
* Fencing
* Nagios *(deprecated since 2.1.6)*
* Upstart *(deprecated since 2.1.0)*
.. index::
single: resource; OCF
single: OCF; resources
single: Open Cluster Framework; resources
Open Cluster Framework
______________________
The Open Cluster Framework (OCF) Resource Agent API is a ClusterLabs
standard for managing services. It is the most preferred since it is
specifically designed for use in a Pacemaker cluster.
OCF agents are scripts that support a variety of actions including ``start``,
``stop``, and ``monitor``. They may accept parameters, making them more
flexible than other classes. The number and purpose of parameters is left to
the agent, which advertises them via the ``meta-data`` action.
Unlike other classes, OCF agents have a *provider* as well as a class and type.
For more information, see the "Resource Agents" chapter of *Pacemaker
Administration* and the `OCF standard
`_.
.. _s-resource-supported-systemd:
.. index::
single: Resource; Systemd
single: Systemd; resources
Systemd
_______
Most Linux distributions use `Systemd
`_ for system initialization
and service management. *Unit files* specify how to manage services and are
usually provided by the distribution.
Pacemaker can manage systemd services. Simply create a resource with
``systemd`` as the resource class and the unit file name as the resource type.
Do *not* run ``systemctl enable`` on the unit.
.. important::
Make sure that any systemd services to be controlled by the cluster are
*not* enabled to start at boot.
.. index::
single: resource; LSB
single: LSB; resources
single: Linux Standard Base; resources
Linux Standard Base
___________________
*LSB* resource agents, also known as `SysV-style
`_, are scripts that
provide start, stop, and status actions for a service.
They are provided by some operating system distributions. If a full path is not
given, they are assumed to be located in a directory specified when your
Pacemaker software was built (usually ``/etc/init.d``).
In order to be used with Pacemaker, they must conform to the `LSB specification
`_
as it relates to init scripts.
.. warning::
Some LSB scripts do not fully comply with the standard. For details on how
to check whether your script is LSB-compatible, see the "Resource Agents"
chapter of `Pacemaker Administration`. Common problems include:
* Not implementing the ``status`` action
* Not observing the correct exit status codes
* Starting a started resource returns an error
* Stopping a stopped resource returns an error
.. important::
Make sure the host is *not* configured to start any LSB services at boot
that will be controlled by the cluster.
.. index::
single: Resource; System Services
single: System Service; resources
System Services
_______________
Since there are various types of system services (``systemd``,
``upstart``, and ``lsb``), Pacemaker supports a special ``service`` alias which
intelligently figures out which one applies to a given cluster node.
This is particularly useful when the cluster contains a mix of
``systemd``, ``upstart``, and ``lsb``.
In order, Pacemaker will try to find the named service as:
* an LSB init script
* a Systemd unit file
* an Upstart job
.. index::
single: Resource; STONITH
single: STONITH; resources
STONITH
_______
The ``stonith`` class is used for managing fencing devices, discussed later in
:ref:`fencing`.
.. index::
single: Resource; Nagios Plugins
single: Nagios Plugins; resources
Nagios Plugins
______________
Nagios Plugins are a way to monitor services. Pacemaker can use these as
resources, to react to a change in the service's status.
To use plugins as resources, Pacemaker must have been built with support, and
OCF-style meta-data for the plugins must be installed on nodes that can run
them. Meta-data for several common plugins is provided by the
`nagios-agents-metadata `_
project.
The supported parameters for such a resource are same as the long options of
the plugin.
Start and monitor actions for plugin resources are implemented as invoking the
plugin. A plugin result of "OK" (0) is treated as success, a result of "WARN"
(1) is treated as a successful but degraded service, and any other result is
considered a failure.
A plugin resource is not going to change its status after recovery by
restarting the plugin, so using them alone does not make sense with ``on-fail``
set (or left to default) to ``restart``. Another value could make sense, for
example, if you want to fence or standby nodes that cannot reach some external
service.
A more common use case for plugin resources is to configure them with a
``container`` meta-attribute set to the name of another resource that actually
makes the service available, such as a virtual machine or container.
With ``container`` set, the plugin resource will automatically be colocated
with the containing resource and ordered after it, and the containing resource
will be considered failed if the plugin resource fails. This allows monitoring
of a service inside a virtual machine or container, with recovery of the
virtual machine or container if the service fails.
.. warning::
Nagios support is deprecated in Pacemaker. Support will be dropped entirely
at the next major release of Pacemaker.
For monitoring a service inside a virtual machine or container, the
recommended alternative is to configure the virtual machine as a guest node
or the container as a :ref:`bundle `. For other use
cases, or when the virtual machine or container image cannot be modified,
the recommended alternative is to write a custom OCF agent for the service
(which may even call the Nagios plugin as part of its status action).
.. index::
single: Resource; Upstart
single: Upstart; resources
Upstart
_______
Some Linux distributions previously used `Upstart
`_ for system initialization and service
management. Pacemaker is able to manage services using Upstart if the local
system supports them and support was enabled when your Pacemaker software was
built.
The *jobs* that specify how services are managed are usually provided by the
operating system distribution.
.. important::
Make sure the host is *not* configured to start any Upstart services at boot
that will be controlled by the cluster.
.. warning::
Upstart support is deprecated in Pacemaker. Upstart is no longer actively
maintained, and test platforms for it are no longer readily usable. Support
will be dropped entirely at the next major release of Pacemaker.
.. _primitive-resource:
Resource Properties
###################
These values tell the cluster which resource agent to use for the resource,
where to find that resource agent and what standards it conforms to.
.. table:: **Properties of a Primitive Resource**
:widths: 1 4
+-------------+------------------------------------------------------------------+
| Field | Description |
+=============+==================================================================+
| id | .. index:: |
| | single: id; resource |
| | single: resource; property, id |
| | |
| | Your name for the resource |
+-------------+------------------------------------------------------------------+
| class | .. index:: |
| | single: class; resource |
| | single: resource; property, class |
| | |
| | The standard the resource agent conforms to. Allowed values: |
| | ``lsb``, ``ocf``, ``service``, ``stonith``, ``systemd``, |
| | ``nagios`` *(deprecated since 2.1.6)*, and ``upstart`` |
| | *(deprecated since 2.1.0)* |
+-------------+------------------------------------------------------------------+
| description | .. index:: |
| | single: description; resource |
| | single: resource; property, description |
| | |
| | A description of the Resource Agent, intended for local use. |
| | E.g. ``IP address for website`` |
+-------------+------------------------------------------------------------------+
| type | .. index:: |
| | single: type; resource |
| | single: resource; property, type |
| | |
| | The name of the Resource Agent you wish to use. E.g. |
| | ``IPaddr`` or ``Filesystem`` |
+-------------+------------------------------------------------------------------+
| provider | .. index:: |
| | single: provider; resource |
| | single: resource; property, provider |
| | |
| | The OCF spec allows multiple vendors to supply the same resource |
| | agent. To use the OCF resource agents supplied by the Heartbeat |
| | project, you would specify ``heartbeat`` here. |
+-------------+------------------------------------------------------------------+
The XML definition of a resource can be queried with the **crm_resource** tool.
For example:
.. code-block:: none
# crm_resource --resource Email --query-xml
might produce:
.. topic:: A system resource definition
.. code-block:: xml
.. note::
One of the main drawbacks to system services (LSB, systemd or
Upstart) resources is that they do not allow any parameters!
.. topic:: An OCF resource definition
.. code-block:: xml
.. _resource_options:
Resource Options
################
Resources have two types of options: *meta-attributes* and *instance attributes*.
Meta-attributes apply to any type of resource, while instance attributes
are specific to each resource agent.
Resource Meta-Attributes
________________________
Meta-attributes are used by the cluster to decide how a resource should
behave and can be easily set using the ``--meta`` option of the
**crm_resource** command.
.. table:: **Meta-attributes of a Primitive Resource**
:class: longtable
:widths: 2 2 3
+----------------------------+----------------------------------+------------------------------------------------------+
| Field | Default | Description |
+============================+==================================+======================================================+
| priority | 0 | .. index:: |
| | | single: priority; resource option |
| | | single: resource; option, priority |
| | | |
| | | If not all resources can be active, the cluster |
| | | will stop lower priority resources in order to |
| | | keep higher priority ones active. |
+----------------------------+----------------------------------+------------------------------------------------------+
| critical | true | .. index:: |
| | | single: critical; resource option |
| | | single: resource; option, critical |
| | | |
| | | Use this value as the default for ``influence`` in |
| | | all :ref:`colocation constraints |
| | | ` involving this resource, |
| | | as well as the implicit colocation constraints |
| | | created if this resource is in a :ref:`group |
| | | `. For details, see |
| | | :ref:`s-coloc-influence`. *(since 2.1.0)* |
+----------------------------+----------------------------------+------------------------------------------------------+
| target-role | Started | .. index:: |
| | | single: target-role; resource option |
| | | single: resource; option, target-role |
| | | |
| | | What state should the cluster attempt to keep this |
| | | resource in? Allowed values: |
| | | |
| | | * ``Stopped:`` Force the resource to be stopped |
| | | * ``Started:`` Allow the resource to be started |
| | | (and in the case of :ref:`promotable clone |
| | | resources `, promoted |
| | | if appropriate) |
| | | * ``Unpromoted:`` Allow the resource to be started, |
| | | but only in the unpromoted role if the resource is |
| | | :ref:`promotable ` |
| | | * ``Promoted:`` Equivalent to ``Started`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| is-managed | TRUE | .. index:: |
| | | single: is-managed; resource option |
| | | single: resource; option, is-managed |
| | | |
| | | Is the cluster allowed to start and stop |
| | | the resource? Allowed values: ``true``, ``false`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| maintenance | FALSE | .. index:: |
| | | single: maintenance; resource option |
| | | single: resource; option, maintenance |
| | | |
| | | Similar to the ``maintenance-mode`` |
| | | :ref:`cluster option `, but for |
| | | a single resource. If true, the resource will not |
| | | be started, stopped, or monitored on any node. This |
| | | differs from ``is-managed`` in that monitors will |
| | | not be run. Allowed values: ``true``, ``false`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| resource-stickiness | 1 for individual clone | .. _resource-stickiness: |
| | instances, 0 for all | |
| | other resources | .. index:: |
| | | single: resource-stickiness; resource option |
| | | single: resource; option, resource-stickiness |
| | | |
| | | A score that will be added to the current node when |
| | | a resource is already active. This allows running |
| | | resources to stay where they are, even if they |
| | | would be placed elsewhere if they were being |
| | | started from a stopped state. |
+----------------------------+----------------------------------+------------------------------------------------------+
| requires | ``quorum`` for resources | .. _requires: |
| | with a ``class`` of ``stonith``, | |
| | otherwise ``unfencing`` if | .. index:: |
| | unfencing is active in the | single: requires; resource option |
| | cluster, otherwise ``fencing`` | single: resource; option, requires |
| | if ``stonith-enabled`` is true, | |
| | otherwise ``quorum`` | Conditions under which the resource can be |
| | | started. Allowed values: |
| | | |
| | | * ``nothing:`` can always be started |
| | | * ``quorum:`` The cluster can only start this |
| | | resource if a majority of the configured nodes |
| | | are active |
| | | * ``fencing:`` The cluster can only start this |
| | | resource if a majority of the configured nodes |
| | | are active *and* any failed or unknown nodes |
| | | have been :ref:`fenced ` |
| | | * ``unfencing:`` The cluster can only start this |
| | | resource if a majority of the configured nodes |
| | | are active *and* any failed or unknown nodes have |
| | | been fenced *and* only on nodes that have been |
| | | :ref:`unfenced ` |
+----------------------------+----------------------------------+------------------------------------------------------+
| migration-threshold | INFINITY | .. index:: |
| | | single: migration-threshold; resource option |
| | | single: resource; option, migration-threshold |
| | | |
| | | How many failures may occur for this resource on |
| | | a node, before this node is marked ineligible to |
| | | host this resource. A value of 0 indicates that this |
| | | feature is disabled (the node will never be marked |
| | | ineligible); by constrast, the cluster treats |
| | | INFINITY (the default) as a very large but finite |
| | | number. This option has an effect only if the |
| | | failed operation specifies ``on-fail`` as |
| | | ``restart`` (the default), and additionally for |
| | | failed ``start`` operations, if the cluster |
| | | property ``start-failure-is-fatal`` is ``false``. |
+----------------------------+----------------------------------+------------------------------------------------------+
| failure-timeout | 0 | .. index:: |
| | | single: failure-timeout; resource option |
| | | single: resource; option, failure-timeout |
| | | |
| | | How many seconds to wait before acting as if the |
| | | failure had not occurred, and potentially allowing |
| | | the resource back to the node on which it failed. |
| | | A value of 0 indicates that this feature is |
| | | disabled. |
+----------------------------+----------------------------------+------------------------------------------------------+
| multiple-active | stop_start | .. index:: |
| | | single: multiple-active; resource option |
| | | single: resource; option, multiple-active |
| | | |
| | | What should the cluster do if it ever finds the |
| | | resource active on more than one node? Allowed |
| | | values: |
| | | |
| | | * ``block``: mark the resource as unmanaged |
| | | * ``stop_only``: stop all active instances and |
| | | leave them that way |
| | | * ``stop_start``: stop all active instances and |
| | | start the resource in one location only |
| | | * ``stop_unexpected``: stop all active instances |
| | | except where the resource should be active (this |
| | | should be used only when extra instances are not |
| | | expected to disrupt existing instances, and the |
| | | resource agent's monitor of an existing instance |
| | | is capable of detecting any problems that could be |
| | | caused; note that any resources ordered after this |
| | | will still need to be restarted) *(since 2.1.3)* |
+----------------------------+----------------------------------+------------------------------------------------------+
| allow-migrate | TRUE for ocf:pacemaker:remote | Whether the cluster should try to "live migrate" |
| | resources, FALSE otherwise | this resource when it needs to be moved (see |
| | | :ref:`live-migration`) |
+----------------------------+----------------------------------+------------------------------------------------------+
| allow-unhealthy-nodes | FALSE | Whether the resource should be able to run on a node |
| | | even if the node's health score would otherwise |
| | | prevent it (see :ref:`node-health`) *(since 2.1.3)* |
+----------------------------+----------------------------------+------------------------------------------------------+
| container-attribute-target | | Specific to bundle resources; see |
| | | :ref:`s-bundle-attributes` |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-node | | The name of the Pacemaker Remote guest node this |
| | | resource is associated with, if any. If |
| | | specified, this both enables the resource as a |
| | | guest node and defines the unique name used to |
| | | identify the guest node. The guest must be |
| | | configured to run the Pacemaker Remote daemon |
| | | when it is started. **WARNING:** This value |
| | | cannot overlap with any resource or node IDs. |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-port | 3121 | If ``remote-node`` is specified, the port on the |
| | | guest used for its Pacemaker Remote connection. |
| | | The Pacemaker Remote daemon on the guest must |
| | | be configured to listen on this port. |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-addr | value of ``remote-node`` | If ``remote-node`` is specified, the IP |
| | | address or hostname used to connect to the |
| | | guest via Pacemaker Remote. The Pacemaker Remote |
| | | daemon on the guest must be configured to accept |
| | | connections on this address. |
+----------------------------+----------------------------------+------------------------------------------------------+
| remote-connect-timeout | 60s | If ``remote-node`` is specified, how long before |
| | | a pending guest connection will time out. |
+----------------------------+----------------------------------+------------------------------------------------------+
As an example of setting resource options, if you performed the following
commands on an LSB Email resource:
.. code-block:: none
# crm_resource --meta --resource Email --set-parameter priority --parameter-value 100
# crm_resource -m -r Email -p multiple-active -v block
the resulting resource definition might be:
.. topic:: An LSB resource with cluster options
.. code-block:: xml
In addition to the cluster-defined meta-attributes described above, you may
also configure arbitrary meta-attributes of your own choosing. Most commonly,
this would be done for use in :ref:`rules `. For example, an IT department
might define a custom meta-attribute to indicate which company department each
resource is intended for. To reduce the chance of name collisions with
cluster-defined meta-attributes added in the future, it is recommended to use
a unique, organization-specific prefix for such attributes.
.. _s-resource-defaults:
Setting Global Defaults for Resource Meta-Attributes
____________________________________________________
To set a default value for a resource option, add it to the
``rsc_defaults`` section with ``crm_attribute``. For example,
.. code-block:: none
# crm_attribute --type rsc_defaults --name is-managed --update false
would prevent the cluster from starting or stopping any of the
resources in the configuration (unless of course the individual
resources were specifically enabled by having their ``is-managed`` set to
``true``).
Resource Instance Attributes
____________________________
The resource agents of some resource classes (lsb, systemd and upstart *not* among them)
can be given parameters which determine how they behave and which instance
of a service they control.
If your resource agent supports parameters, you can add them with the
``crm_resource`` command. For example,
.. code-block:: none
# crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2
would create an entry in the resource like this:
.. topic:: An example OCF resource with instance attributes
.. code-block:: xml
For an OCF resource, the result would be an environment variable
called ``OCF_RESKEY_ip`` with a value of ``192.0.2.2``.
The list of instance attributes supported by an OCF resource agent can be
found by calling the resource agent with the ``meta-data`` command.
The output contains an XML description of all the supported
attributes, their purpose and default values.
.. topic:: Displaying the metadata for the Dummy resource agent template
.. code-block:: none
# export OCF_ROOT=/usr/lib/ocf
# $OCF_ROOT/resource.d/pacemaker/Dummy meta-data
.. code-block:: xml
1.1
This is a dummy OCF resource agent. It does absolutely nothing except keep track
of whether it is running or not, and can be configured so that actions fail or
take a long time. Its purpose is primarily for testing, and to serve as a
template for resource agent writers.
Example stateless resource agent
Location to store the resource state in.
State file
Fake password field
Password
Fake attribute that can be changed to cause a reload
Fake attribute that can be changed to cause a reload
Number of seconds to sleep during operations. This can be used to test how
the cluster reacts to operation timeouts.
Operation sleep duration in seconds.
Start, migrate_from, and reload-agent actions will return failure if running on
the host specified here, but the resource will run successfully anyway (future
monitor calls will find it running). This can be used to test on-fail=ignore.
Report bogus start failure on specified host
If this is set, the environment will be dumped to this file for every call.
Environment dump file
-
-.. index::
- single: resource; action
- single: resource; operation
-
-.. _operation:
-
-Resource Operations
-###################
-
-*Operations* are actions the cluster can perform on a resource by calling the
-resource agent. Resource agents must support certain common operations such as
-start, stop, and monitor, and may implement any others.
-
-Operations may be explicitly configured for two purposes: to override defaults
-for options (such as timeout) that the cluster will use whenever it initiates
-the operation, and to run an operation on a recurring basis (for example, to
-monitor the resource for failure).
-
-.. topic:: An OCF resource with a non-default start timeout
-
- .. code-block:: xml
-
-
-
-
-
-
-
-
-
-
-Pacemaker identifies operations by a combination of name and interval, so this
-combination must be unique for each resource. That is, you should not configure
-two operations for the same resource with the same name and interval.
-
-.. _operation_properties:
-
-Operation Properties
-____________________
-
-Operation properties may be specified directly in the ``op`` element as
-XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements.
-XML attributes take precedence over ``nvpair`` elements if both are specified.
-
-.. table:: **Properties of an Operation**
- :class: longtable
- :widths: 1 2 3
-
- +----------------+-----------------------------------+-----------------------------------------------------+
- | Field | Default | Description |
- +================+===================================+=====================================================+
- | id | | .. index:: |
- | | | single: id; action property |
- | | | single: action; property, id |
- | | | |
- | | | A unique name for the operation. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | name | | .. index:: |
- | | | single: name; action property |
- | | | single: action; property, name |
- | | | |
- | | | The action to perform. This can be any action |
- | | | supported by the agent; common values include |
- | | | ``monitor``, ``start``, and ``stop``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | interval | 0 | .. index:: |
- | | | single: interval; action property |
- | | | single: action; property, interval |
- | | | |
- | | | How frequently (in seconds) to perform the |
- | | | operation. A value of 0 means "when needed". |
- | | | A positive value defines a *recurring action*, |
- | | | which is typically used with |
- | | | :ref:`monitor `. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | timeout | | .. index:: |
- | | | single: timeout; action property |
- | | | single: action; property, timeout |
- | | | |
- | | | How long to wait before declaring the action |
- | | | has failed |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | on-fail | Varies by action: | .. index:: |
- | | | single: on-fail; action property |
- | | * ``stop``: ``fence`` if | single: action; property, on-fail |
- | | ``stonith-enabled`` is true | |
- | | or ``block`` otherwise | The action to take if this action ever fails. |
- | | * ``demote``: ``on-fail`` of the | Allowed values: |
- | | ``monitor`` action with | |
- | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. |
- | | if present, enabled, and | * ``block:`` Don't perform any further operations |
- | | configured to a value other | on the resource. |
- | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start |
- | | otherwise | it elsewhere. |
- | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a |
- | | | full restart. This is valid only for ``promote`` |
- | | | actions, and for ``monitor`` actions with both |
- | | | a nonzero ``interval`` and ``role`` set to |
- | | | ``Promoted``; for any other action, a |
- | | | configuration error will be logged, and the |
- | | | default behavior will be used. *(since 2.0.5)* |
- | | | * ``restart:`` Stop the resource and start it |
- | | | again (possibly on a different node). |
- | | | * ``fence:`` STONITH the node on which the |
- | | | resource failed. |
- | | | * ``standby:`` Move *all* resources away from the |
- | | | node on which the resource failed. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | enabled | TRUE | .. index:: |
- | | | single: enabled; action property |
- | | | single: action; property, enabled |
- | | | |
- | | | If ``false``, ignore this operation definition. |
- | | | This is typically used to pause a particular |
- | | | recurring ``monitor`` operation; for instance, it |
- | | | can complement the respective resource being |
- | | | unmanaged (``is-managed=false``), as this alone |
- | | | will :ref:`not block any configured monitoring |
- | | | `. Disabling the operation |
- | | | does not suppress all actions of the given type. |
- | | | Allowed values: ``true``, ``false``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | record-pending | TRUE | .. index:: |
- | | | single: record-pending; action property |
- | | | single: action; property, record-pending |
- | | | |
- | | | If ``true``, the intention to perform the operation |
- | | | is recorded so that GUIs and CLI tools can indicate |
- | | | that an operation is in progress. This is best set |
- | | | as an *operation default* |
- | | | (see :ref:`s-operation-defaults`). Allowed values: |
- | | | ``true``, ``false``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | role | | .. index:: |
- | | | single: role; action property |
- | | | single: action; property, role |
- | | | |
- | | | Run the operation only on node(s) that the cluster |
- | | | thinks should be in the specified role. This only |
- | | | makes sense for recurring ``monitor`` operations. |
- | | | Allowed (case-sensitive) values: ``Stopped``, |
- | | | ``Started``, and in the case of :ref:`promotable |
- | | | clone resources `, |
- | | | ``Unpromoted`` and ``Promoted``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
-
-.. note::
-
- When ``on-fail`` is set to ``demote``, recovery from failure by a successful
- demote causes the cluster to recalculate whether and where a new instance
- should be promoted. The node with the failure is eligible, so if promotion
- scores have not changed, it will be promoted again.
-
- There is no direct equivalent of ``migration-threshold`` for the promoted
- role, but the same effect can be achieved with a location constraint using a
- :ref:`rule ` with a node attribute expression for the resource's fail
- count.
-
- For example, to immediately ban the promoted role from a node with any
- failed promote or promoted instance monitor:
-
- .. code-block:: xml
-
-
-
-
-
-
-
-
- This example assumes that there is a promotable clone of the ``my_primitive``
- resource (note that the primitive name, not the clone name, is used in the
- rule), and that there is a recurring 10-second-interval monitor configured for
- the promoted role (fail count attributes specify the interval in
- milliseconds).
-
-.. _s-resource-monitoring:
-
-Monitoring Resources for Failure
-________________________________
-
-When Pacemaker first starts a resource, it runs one-time ``monitor`` operations
-(referred to as *probes*) to ensure the resource is running where it's
-supposed to be, and not running where it's not supposed to be. (This behavior
-can be affected by the ``resource-discovery`` location constraint property.)
-
-Other than those initial probes, Pacemaker will *not* (by default) check that
-the resource continues to stay healthy [#]_. You must configure ``monitor``
-operations explicitly to perform these checks.
-
-.. topic:: An OCF resource with a recurring health check
-
- .. code-block:: xml
-
-
-
-
-
-
-
-
-
-
-
-By default, a ``monitor`` operation will ensure that the resource is running
-where it is supposed to. The ``target-role`` property can be used for further
-checking.
-
-For example, if a resource has one ``monitor`` operation with
-``interval=10 role=Started`` and a second ``monitor`` operation with
-``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes
-it thinks *should* be running the resource, and the second monitor on any nodes
-that it thinks *should not* be running the resource (for the truly paranoid,
-who want to know when an administrator manually starts a service by mistake).
-
-.. note::
-
- Currently, monitors with ``role=Stopped`` are not implemented for
- :ref:`clone ` resources.
-
-.. _s-monitoring-unmanaged:
-
-Monitoring Resources When Administration is Disabled
-____________________________________________________
-
-Recurring ``monitor`` operations behave differently under various administrative
-settings:
-
-* When a resource is unmanaged (by setting ``is-managed=false``): No monitors
- will be stopped.
-
- If the unmanaged resource is stopped on a node where the cluster thinks it
- should be running, the cluster will detect and report that it is not, but it
- will not consider the monitor failed, and will not try to start the resource
- until it is managed again.
-
- Starting the unmanaged resource on a different node is strongly discouraged
- and will at least cause the cluster to consider the resource failed, and
- may require the resource's ``target-role`` to be set to ``Stopped`` then
- ``Started`` to be recovered.
-
-* When a resource is put into maintenance mode (by setting
- ``maintenance=true``): The resource will be marked as unmanaged. (This
- overrides ``is-managed=true``.)
-
- Additionally, all monitor operations will be stopped, except those specifying
- ``role`` as ``Stopped`` (which will be newly initiated if appropriate). As
- with unmanaged resources in general, starting a resource on a node other than
- where the cluster expects it to be will cause problems.
-
-* When a node is put into standby: All resources will be moved away from the
- node, and all ``monitor`` operations will be stopped on the node, except those
- specifying ``role`` as ``Stopped`` (which will be newly initiated if
- appropriate).
-
-* When a node is put into maintenance mode: All resources that are active on the
- node will be marked as in maintenance mode. See above for more details.
-
-* When the cluster is put into maintenance mode: All resources in the cluster
- will be marked as in maintenance mode. See above for more details.
-
-A resource is in maintenance mode if the cluster, the node where the resource
-is active, or the resource itself is configured to be in maintenance mode. If a
-resource is in maintenance mode, then it is also unmanaged. However, if a
-resource is unmanaged, it is not necessarily in maintenance mode.
-
-.. _s-operation-defaults:
-
-Setting Global Defaults for Operations
-______________________________________
-
-You can change the global default values for operation properties
-in a given cluster. These are defined in an ``op_defaults`` section
-of the CIB's ``configuration`` section, and can be set with
-``crm_attribute``. For example,
-
-.. code-block:: none
-
- # crm_attribute --type op_defaults --name timeout --update 20s
-
-would default each operation's ``timeout`` to 20 seconds. If an
-operation's definition also includes a value for ``timeout``, then that
-value would be used for that operation instead.
-
-When Implicit Operations Take a Long Time
-_________________________________________
-
-The cluster will always perform a number of implicit operations: ``start``,
-``stop`` and a non-recurring ``monitor`` operation used at startup to check
-whether the resource is already active. If one of these is taking too long,
-then you can create an entry for them and specify a longer timeout.
-
-.. topic:: An OCF resource with custom timeouts for its implicit actions
-
- .. code-block:: xml
-
-
-
-
-
-
-
-
-
-
-
-
-Multiple Monitor Operations
-___________________________
-
-Provided no two operations (for a single resource) have the same name
-and interval, you can have as many ``monitor`` operations as you like.
-In this way, you can do a superficial health check every minute and
-progressively more intense ones at higher intervals.
-
-To tell the resource agent what kind of check to perform, you need to
-provide each monitor with a different value for a common parameter.
-The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL``
-for this purpose and dictates that it is "made available to the
-resource agent without the normal ``OCF_RESKEY`` prefix".
-
-Whatever name you choose, you can specify it by adding an
-``instance_attributes`` block to the ``op`` tag. It is up to each
-resource agent to look for the parameter and decide how to use it.
-
-.. topic:: An OCF resource with two recurring health checks, performing
- different levels of checks specified via ``OCF_CHECK_LEVEL``.
-
- .. code-block:: xml
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Disabling a Monitor Operation
-_____________________________
-
-The easiest way to stop a recurring monitor is to just delete it.
-However, there can be times when you only want to disable it
-temporarily. In such cases, simply add ``enabled=false`` to the
-operation's definition.
-
-.. topic:: Example of an OCF resource with a disabled health check
-
- .. code-block:: xml
-
-
-
-
-
-
-
-
-
-
-This can be achieved from the command line by executing:
-
-.. code-block:: none
-
- # cibadmin --modify --xml-text ''
-
-Once you've done whatever you needed to do, you can then re-enable it with
-
-.. code-block:: none
-
- # cibadmin --modify --xml-text ''
-
-.. [#] Currently, anyway. Automatic monitoring operations may be added in a future
- version of Pacemaker.
diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h
index a8ed601f07..1dd2fc7913 100644
--- a/include/crm/msg_xml.h
+++ b/include/crm/msg_xml.h
@@ -1,485 +1,485 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_MSG_XML__H
# define PCMK__CRM_MSG_XML__H
# include
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* This file defines constants for various XML syntax (mainly element and
* attribute names).
*
* For consistency, new constants should start with "PCMK_", followed by "XE"
* for XML element names, "XA" for XML attribute names, and "META" for meta
* attribute names. Old names that don't follow this policy should eventually be
* deprecated and replaced with names that do.
*/
/*
* XML elements
*/
#define PCMK_XE_DATE_EXPRESSION "date_expression"
#define PCMK_XE_OP_EXPRESSION "op_expression"
/* This has been deprecated as a CIB element (an alias for with
* "promotable" set to "true") since 2.0.0.
*/
#define PCMK_XE_PROMOTABLE_LEGACY "master"
#define PCMK_XE_RSC_EXPRESSION "rsc_expression"
/*
* XML attributes
*/
/* These have been deprecated as CIB element attributes (aliases for
* "promoted-max" and "promoted-node-max") since 2.0.0.
*/
#define PCMK_XA_PROMOTED_MAX_LEGACY "master-max"
#define PCMK_XA_PROMOTED_NODE_MAX_LEGACY "master-node-max"
/*
* Meta attributes
*/
#define PCMK_META_ENABLED "enabled"
+#define PCMK_META_FAILURE_TIMEOUT "failure-timeout"
+#define PCMK_META_MIGRATION_THRESHOLD "migration-threshold"
/*
* Older constants that don't follow current naming
*/
# ifndef F_ORIG
# define F_ORIG "src"
# endif
# ifndef F_SEQ
# define F_SEQ "seq"
# endif
# ifndef F_SUBTYPE
# define F_SUBTYPE "subt"
# endif
# ifndef F_TYPE
# define F_TYPE "t"
# endif
# ifndef F_CLIENTNAME
# define F_CLIENTNAME "cn"
# endif
# ifndef F_XML_TAGNAME
# define F_XML_TAGNAME "__name__"
# endif
# ifndef T_CRM
# define T_CRM "crmd"
# endif
# ifndef T_ATTRD
# define T_ATTRD "attrd"
# endif
# define CIB_OPTIONS_FIRST "cib-bootstrap-options"
# define F_CRM_DATA "crm_xml"
# define F_CRM_TASK "crm_task"
# define F_CRM_HOST_TO "crm_host_to"
# define F_CRM_MSG_TYPE F_SUBTYPE
# define F_CRM_SYS_TO "crm_sys_to"
# define F_CRM_SYS_FROM "crm_sys_from"
# define F_CRM_HOST_FROM F_ORIG
# define F_CRM_REFERENCE XML_ATTR_REFERENCE
# define F_CRM_VERSION XML_ATTR_VERSION
# define F_CRM_ORIGIN "origin"
# define F_CRM_USER "crm_user"
# define F_CRM_JOIN_ID "join_id"
# define F_CRM_DC_LEAVING "dc-leaving"
# define F_CRM_ELECTION_ID "election-id"
# define F_CRM_ELECTION_AGE_S "election-age-sec"
# define F_CRM_ELECTION_AGE_US "election-age-nano-sec"
# define F_CRM_ELECTION_OWNER "election-owner"
# define F_CRM_TGRAPH "crm-tgraph-file"
# define F_CRM_TGRAPH_INPUT "crm-tgraph-in"
# define F_CRM_THROTTLE_MODE "crm-limit-mode"
# define F_CRM_THROTTLE_MAX "crm-limit-max"
/*---- Common tags/attrs */
# define XML_DIFF_MARKER "__crm_diff_marker__"
# define XML_TAG_CIB "cib"
# define XML_TAG_FAILED "failed"
# define XML_ATTR_CRM_VERSION "crm_feature_set"
# define XML_ATTR_DIGEST "digest"
# define XML_ATTR_VALIDATION "validate-with"
# define XML_ATTR_QUORUM_PANIC "no-quorum-panic"
# define XML_ATTR_HAVE_QUORUM "have-quorum"
# define XML_ATTR_HAVE_WATCHDOG "have-watchdog"
# define XML_ATTR_GENERATION "epoch"
# define XML_ATTR_GENERATION_ADMIN "admin_epoch"
# define XML_ATTR_NUMUPDATES "num_updates"
# define XML_ATTR_TIMEOUT "timeout"
# define XML_ATTR_ORIGIN "crm-debug-origin"
# define XML_ATTR_TSTAMP "crm-timestamp"
# define XML_CIB_ATTR_WRITTEN "cib-last-written"
# define XML_ATTR_VERSION "version"
# define XML_ATTR_DESC "description"
# define XML_ATTR_ID "id"
# define XML_ATTR_NAME "name"
# define XML_ATTR_IDREF "id-ref"
# define XML_ATTR_ID_LONG "long-id"
# define XML_ATTR_TYPE "type"
# define XML_ATTR_VERBOSE "verbose"
# define XML_ATTR_OP "op"
# define XML_ATTR_DC_UUID "dc-uuid"
# define XML_ATTR_UPDATE_ORIG "update-origin"
# define XML_ATTR_UPDATE_CLIENT "update-client"
# define XML_ATTR_UPDATE_USER "update-user"
# define XML_BOOLEAN_TRUE "true"
# define XML_BOOLEAN_FALSE "false"
# define XML_BOOLEAN_YES XML_BOOLEAN_TRUE
# define XML_BOOLEAN_NO XML_BOOLEAN_FALSE
# define XML_TAG_OPTIONS "options"
/*---- top level tags/attrs */
# define XML_ATTR_REQUEST "request"
# define XML_ATTR_RESPONSE "response"
# define XML_ATTR_UNAME "uname"
# define XML_ATTR_REFERENCE "reference"
# define XML_CRM_TAG_PING "ping_response"
# define XML_PING_ATTR_STATUS "result"
# define XML_PING_ATTR_SYSFROM "crm_subsystem"
# define XML_PING_ATTR_CRMDSTATE "crmd_state"
# define XML_PING_ATTR_PACEMAKERDSTATE "pacemakerd_state"
# define XML_PING_ATTR_PACEMAKERDSTATE_INIT "init"
# define XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS "starting_daemons"
# define XML_PING_ATTR_PACEMAKERDSTATE_WAITPING "wait_for_ping"
# define XML_PING_ATTR_PACEMAKERDSTATE_RUNNING "running"
# define XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN "shutting_down"
# define XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE "shutdown_complete"
# define XML_PING_ATTR_PACEMAKERDSTATE_REMOTE "remote"
# define XML_FAIL_TAG_CIB "failed_update"
# define XML_FAILCIB_ATTR_ID "id"
# define XML_FAILCIB_ATTR_OBJTYPE "object_type"
# define XML_FAILCIB_ATTR_OP "operation"
# define XML_FAILCIB_ATTR_REASON "reason"
/*---- CIB specific tags/attrs */
# define XML_CIB_TAG_SECTION_ALL "all"
# define XML_CIB_TAG_CONFIGURATION "configuration"
# define XML_CIB_TAG_STATUS "status"
# define XML_CIB_TAG_RESOURCES "resources"
# define XML_CIB_TAG_NODES "nodes"
# define XML_CIB_TAG_DOMAINS "domains"
# define XML_CIB_TAG_CONSTRAINTS "constraints"
# define XML_CIB_TAG_CRMCONFIG "crm_config"
# define XML_CIB_TAG_OPCONFIG "op_defaults"
# define XML_CIB_TAG_RSCCONFIG "rsc_defaults"
# define XML_CIB_TAG_ACLS "acls"
# define XML_CIB_TAG_ALERTS "alerts"
# define XML_CIB_TAG_ALERT "alert"
# define XML_CIB_TAG_ALERT_RECIPIENT "recipient"
# define XML_CIB_TAG_ALERT_SELECT "select"
# define XML_CIB_TAG_ALERT_ATTRIBUTES "select_attributes"
# define XML_CIB_TAG_ALERT_FENCING "select_fencing"
# define XML_CIB_TAG_ALERT_NODES "select_nodes"
# define XML_CIB_TAG_ALERT_RESOURCES "select_resources"
# define XML_CIB_TAG_ALERT_ATTR "attribute"
# define XML_CIB_TAG_STATE "node_state"
# define XML_CIB_TAG_NODE "node"
# define XML_CIB_TAG_NVPAIR "nvpair"
# define XML_CIB_TAG_PROPSET "cluster_property_set"
# define XML_TAG_ATTR_SETS "instance_attributes"
# define XML_TAG_META_SETS "meta_attributes"
# define XML_TAG_ATTRS "attributes"
# define XML_TAG_PARAMS "parameters"
# define XML_TAG_PARAM "param"
# define XML_TAG_UTILIZATION "utilization"
# define XML_TAG_RESOURCE_REF "resource_ref"
# define XML_CIB_TAG_RESOURCE "primitive"
# define XML_CIB_TAG_GROUP "group"
# define XML_CIB_TAG_INCARNATION "clone"
# define XML_CIB_TAG_CONTAINER "bundle"
# define XML_CIB_TAG_RSC_TEMPLATE "template"
# define XML_RSC_ATTR_TARGET "container-attribute-target"
# define XML_RSC_ATTR_RESTART "restart-type"
# define XML_RSC_ATTR_ORDERED "ordered"
# define XML_RSC_ATTR_INTERLEAVE "interleave"
# define XML_RSC_ATTR_INCARNATION "clone"
# define XML_RSC_ATTR_INCARNATION_MAX "clone-max"
# define XML_RSC_ATTR_INCARNATION_MIN "clone-min"
# define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max"
# define XML_RSC_ATTR_PROMOTABLE "promotable"
# define XML_RSC_ATTR_PROMOTED_MAX "promoted-max"
# define XML_RSC_ATTR_PROMOTED_NODEMAX "promoted-node-max"
# define XML_RSC_ATTR_MANAGED "is-managed"
# define XML_RSC_ATTR_TARGET_ROLE "target-role"
# define XML_RSC_ATTR_UNIQUE "globally-unique"
# define XML_RSC_ATTR_NOTIFY "notify"
# define XML_RSC_ATTR_STICKINESS "resource-stickiness"
-# define XML_RSC_ATTR_FAIL_STICKINESS "migration-threshold"
-# define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout"
# define XML_RSC_ATTR_MULTIPLE "multiple-active"
# define XML_RSC_ATTR_REQUIRES "requires"
# define XML_RSC_ATTR_CONTAINER "container"
# define XML_RSC_ATTR_INTERNAL_RSC "internal_rsc"
# define XML_RSC_ATTR_MAINTENANCE "maintenance"
# define XML_RSC_ATTR_REMOTE_NODE "remote-node"
# define XML_RSC_ATTR_CLEAR_OP "clear_failure_op"
# define XML_RSC_ATTR_CLEAR_INTERVAL "clear_failure_interval"
# define XML_RSC_ATTR_REMOTE_RA_ADDR "addr"
# define XML_RSC_ATTR_REMOTE_RA_SERVER "server"
# define XML_RSC_ATTR_REMOTE_RA_PORT "port"
# define XML_RSC_ATTR_CRITICAL "critical"
# define XML_REMOTE_ATTR_RECONNECT_INTERVAL "reconnect_interval"
# define XML_OP_ATTR_ON_FAIL "on-fail"
# define XML_OP_ATTR_START_DELAY "start-delay"
# define XML_OP_ATTR_ALLOW_MIGRATE "allow-migrate"
# define XML_OP_ATTR_ORIGIN "interval-origin"
# define XML_OP_ATTR_PENDING "record-pending"
# define XML_OP_ATTR_DIGESTS_ALL "digests-all"
# define XML_OP_ATTR_DIGESTS_SECURE "digests-secure"
# define XML_CIB_TAG_LRM "lrm"
# define XML_LRM_TAG_RESOURCES "lrm_resources"
# define XML_LRM_TAG_RESOURCE "lrm_resource"
# define XML_LRM_TAG_RSC_OP "lrm_rsc_op"
# define XML_AGENT_ATTR_CLASS "class"
# define XML_AGENT_ATTR_PROVIDER "provider"
//! \deprecated Do not use (will be removed in a future release)
# define XML_CIB_ATTR_REPLACE "replace"
# define XML_CIB_ATTR_SOURCE "source"
# define XML_CIB_ATTR_PRIORITY "priority"
# define XML_CIB_ATTR_SOURCE "source"
# define XML_NODE_JOIN_STATE "join"
# define XML_NODE_EXPECTED "expected"
# define XML_NODE_IN_CLUSTER "in_ccm"
# define XML_NODE_IS_PEER "crmd"
# define XML_NODE_IS_REMOTE "remote_node"
# define XML_NODE_IS_FENCED "node_fenced"
# define XML_NODE_IS_MAINTENANCE "node_in_maintenance"
# define XML_CIB_ATTR_SHUTDOWN "shutdown"
/* Aside from being an old name for the executor, LRM is a misnomer here because
* the controller and scheduler use these to track actions, which are not always
* executor operations.
*/
// XML attribute that takes interval specification (user-facing configuration)
# define XML_LRM_ATTR_INTERVAL "interval"
// XML attribute that takes interval in milliseconds (daemon APIs)
// (identical value as above, but different constant allows clearer code intent)
# define XML_LRM_ATTR_INTERVAL_MS XML_LRM_ATTR_INTERVAL
# define XML_LRM_ATTR_TASK "operation"
# define XML_LRM_ATTR_TASK_KEY "operation_key"
# define XML_LRM_ATTR_TARGET "on_node"
# define XML_LRM_ATTR_TARGET_UUID "on_node_uuid"
/*! Actions to be executed on Pacemaker Remote nodes are routed through the
* controller on the cluster node hosting the remote connection. That cluster
* node is considered the router node for the action.
*/
# define XML_LRM_ATTR_ROUTER_NODE "router_node"
# define XML_LRM_ATTR_RSCID "rsc-id"
# define XML_LRM_ATTR_OPSTATUS "op-status"
# define XML_LRM_ATTR_RC "rc-code"
# define XML_LRM_ATTR_CALLID "call-id"
# define XML_LRM_ATTR_OP_DIGEST "op-digest"
# define XML_LRM_ATTR_OP_RESTART "op-force-restart"
# define XML_LRM_ATTR_OP_SECURE "op-secure-params"
# define XML_LRM_ATTR_RESTART_DIGEST "op-restart-digest"
# define XML_LRM_ATTR_SECURE_DIGEST "op-secure-digest"
# define XML_LRM_ATTR_EXIT_REASON "exit-reason"
# define XML_RSC_OP_LAST_CHANGE "last-rc-change"
# define XML_RSC_OP_LAST_RUN "last-run" // deprecated since 2.0.3
# define XML_RSC_OP_T_EXEC "exec-time"
# define XML_RSC_OP_T_QUEUE "queue-time"
# define XML_LRM_ATTR_MIGRATE_SOURCE "migrate_source"
# define XML_LRM_ATTR_MIGRATE_TARGET "migrate_target"
# define XML_TAG_GRAPH "transition_graph"
# define XML_GRAPH_TAG_RSC_OP "rsc_op"
# define XML_GRAPH_TAG_PSEUDO_EVENT "pseudo_event"
# define XML_GRAPH_TAG_CRM_EVENT "crm_event"
# define XML_GRAPH_TAG_DOWNED "downed"
# define XML_GRAPH_TAG_MAINTENANCE "maintenance"
# define XML_TAG_RULE "rule"
# define XML_RULE_ATTR_SCORE "score"
# define XML_RULE_ATTR_SCORE_ATTRIBUTE "score-attribute"
# define XML_RULE_ATTR_ROLE "role"
# define XML_RULE_ATTR_BOOLEAN_OP "boolean-op"
# define XML_TAG_EXPRESSION "expression"
# define XML_EXPR_ATTR_ATTRIBUTE "attribute"
# define XML_EXPR_ATTR_OPERATION "operation"
# define XML_EXPR_ATTR_VALUE "value"
# define XML_EXPR_ATTR_TYPE "type"
# define XML_EXPR_ATTR_VALUE_SOURCE "value-source"
# define XML_CONS_TAG_RSC_DEPEND "rsc_colocation"
# define XML_CONS_TAG_RSC_ORDER "rsc_order"
# define XML_CONS_TAG_RSC_LOCATION "rsc_location"
# define XML_CONS_TAG_RSC_TICKET "rsc_ticket"
# define XML_CONS_TAG_RSC_SET "resource_set"
# define XML_CONS_ATTR_SYMMETRICAL "symmetrical"
# define XML_LOCATION_ATTR_DISCOVERY "resource-discovery"
# define XML_COLOC_ATTR_SOURCE "rsc"
# define XML_COLOC_ATTR_SOURCE_ROLE "rsc-role"
# define XML_COLOC_ATTR_TARGET "with-rsc"
# define XML_COLOC_ATTR_TARGET_ROLE "with-rsc-role"
# define XML_COLOC_ATTR_NODE_ATTR "node-attribute"
# define XML_COLOC_ATTR_INFLUENCE "influence"
//! \deprecated Deprecated since 2.1.5
# define XML_COLOC_ATTR_SOURCE_INSTANCE "rsc-instance"
//! \deprecated Deprecated since 2.1.5
# define XML_COLOC_ATTR_TARGET_INSTANCE "with-rsc-instance"
# define XML_LOC_ATTR_SOURCE "rsc"
# define XML_LOC_ATTR_SOURCE_PATTERN "rsc-pattern"
# define XML_ORDER_ATTR_FIRST "first"
# define XML_ORDER_ATTR_THEN "then"
# define XML_ORDER_ATTR_FIRST_ACTION "first-action"
# define XML_ORDER_ATTR_THEN_ACTION "then-action"
# define XML_ORDER_ATTR_KIND "kind"
//! \deprecated Deprecated since 2.1.5
# define XML_ORDER_ATTR_FIRST_INSTANCE "first-instance"
//! \deprecated Deprecated since 2.1.5
# define XML_ORDER_ATTR_THEN_INSTANCE "then-instance"
# define XML_TICKET_ATTR_TICKET "ticket"
# define XML_TICKET_ATTR_LOSS_POLICY "loss-policy"
# define XML_NVPAIR_ATTR_NAME "name"
# define XML_NVPAIR_ATTR_VALUE "value"
# define XML_NODE_ATTR_RSC_DISCOVERY "resource-discovery-enabled"
# define XML_CONFIG_ATTR_DC_DEADTIME "dc-deadtime"
# define XML_CONFIG_ATTR_ELECTION_FAIL "election-timeout"
# define XML_CONFIG_ATTR_FORCE_QUIT "shutdown-escalation"
# define XML_CONFIG_ATTR_RECHECK "cluster-recheck-interval"
# define XML_CONFIG_ATTR_FENCE_REACTION "fence-reaction"
# define XML_CONFIG_ATTR_SHUTDOWN_LOCK "shutdown-lock"
# define XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT "shutdown-lock-limit"
# define XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY "priority-fencing-delay"
# define XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT "node-pending-timeout"
# define XML_ALERT_ATTR_PATH "path"
# define XML_ALERT_ATTR_TIMEOUT "timeout"
# define XML_ALERT_ATTR_TSTAMP_FORMAT "timestamp-format"
# define XML_ALERT_ATTR_REC_VALUE "value"
# define XML_CIB_TAG_GENERATION_TUPPLE "generation_tuple"
# define XML_ATTR_TRANSITION_MAGIC "transition-magic"
# define XML_ATTR_TRANSITION_KEY "transition-key"
# define XML_ATTR_TE_NOWAIT "op_no_wait"
# define XML_ATTR_TE_TARGET_RC "op_target_rc"
# define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes"
# define XML_TAG_DIFF_ADDED "diff-added"
# define XML_TAG_DIFF_REMOVED "diff-removed"
# define XML_ACL_TAG_USER "acl_target"
# define XML_ACL_TAG_USERv1 "acl_user"
# define XML_ACL_TAG_GROUP "acl_group"
# define XML_ACL_TAG_ROLE "acl_role"
# define XML_ACL_TAG_PERMISSION "acl_permission"
# define XML_ACL_TAG_ROLE_REF "role"
# define XML_ACL_TAG_ROLE_REFv1 "role_ref"
# define XML_ACL_ATTR_KIND "kind"
# define XML_ACL_TAG_READ "read"
# define XML_ACL_TAG_WRITE "write"
# define XML_ACL_TAG_DENY "deny"
# define XML_ACL_ATTR_REF "reference"
# define XML_ACL_ATTR_REFv1 "ref"
# define XML_ACL_ATTR_TAG "object-type"
# define XML_ACL_ATTR_TAGv1 "tag"
# define XML_ACL_ATTR_XPATH "xpath"
# define XML_ACL_ATTR_ATTRIBUTE "attribute"
# define XML_CIB_TAG_TICKETS "tickets"
# define XML_CIB_TAG_TICKET_STATE "ticket_state"
# define XML_CIB_TAG_TAGS "tags"
# define XML_CIB_TAG_TAG "tag"
# define XML_CIB_TAG_OBJ_REF "obj_ref"
# define XML_TAG_FENCING_TOPOLOGY "fencing-topology"
# define XML_TAG_FENCING_LEVEL "fencing-level"
# define XML_ATTR_STONITH_INDEX "index"
# define XML_ATTR_STONITH_TARGET "target"
# define XML_ATTR_STONITH_TARGET_VALUE "target-value"
# define XML_ATTR_STONITH_TARGET_PATTERN "target-pattern"
# define XML_ATTR_STONITH_TARGET_ATTRIBUTE "target-attribute"
# define XML_ATTR_STONITH_DEVICES "devices"
# define XML_TAG_DIFF "diff"
# define XML_DIFF_VERSION "version"
# define XML_DIFF_VSOURCE "source"
# define XML_DIFF_VTARGET "target"
# define XML_DIFF_CHANGE "change"
# define XML_DIFF_LIST "change-list"
# define XML_DIFF_ATTR "change-attr"
# define XML_DIFF_RESULT "change-result"
# define XML_DIFF_OP "operation"
# define XML_DIFF_PATH "path"
# define XML_DIFF_POSITION "position"
# define ID(x) crm_element_value(x, XML_ATTR_ID)
#ifdef __cplusplus
}
#endif
#endif
diff --git a/include/crm/msg_xml_compat.h b/include/crm/msg_xml_compat.h
index 0ea36e941d..a795b86c39 100644
--- a/include/crm/msg_xml_compat.h
+++ b/include/crm/msg_xml_compat.h
@@ -1,71 +1,77 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_MSG_XML_COMPAT__H
# define PCMK__CRM_MSG_XML_COMPAT__H
#include // PCMK_STONITH_PROVIDES
#ifdef __cplusplus
extern "C" {
#endif
/**
* \file
* \brief Deprecated Pacemaker XML constants API
* \ingroup core
* \deprecated Do not include this header directly. The XML constants in this
* header, and the header itself, will be removed in a future
* release.
*/
//! \deprecated Use PCMK_STONITH_PROVIDES instead
#define XML_RSC_ATTR_PROVIDES PCMK_STONITH_PROVIDES
//! \deprecated Use PCMK_XE_PROMOTABLE_LEGACY instead
#define XML_CIB_TAG_MASTER PCMK_XE_PROMOTABLE_LEGACY
//! \deprecated Use PCMK_XA_PROMOTED_MAX_LEGACY instead
#define PCMK_XE_PROMOTED_MAX_LEGACY PCMK_XA_PROMOTED_MAX_LEGACY
//! \deprecated Use PCMK_XA_PROMOTED_MAX_LEGACY instead
#define XML_RSC_ATTR_MASTER_MAX PCMK_XA_PROMOTED_MAX_LEGACY
//! \deprecated Use PCMK_XA_PROMOTED_NODE_MAX_LEGACY instead
#define PCMK_XE_PROMOTED_NODE_MAX_LEGACY PCMK_XA_PROMOTED_NODE_MAX_LEGACY
+//! \deprecated Use PCMK_META_MIGRATION_THRESHOLD instead
+#define XML_RSC_ATTR_FAIL_STICKINESS PCMK_META_MIGRATION_THRESHOLD
+
+//! \deprecated Use PCMK_META_FAILURE_TIMEOUT instead
+#define XML_RSC_ATTR_FAIL_TIMEOUT PCMK_META_FAILURE_TIMEOUT
+
//! \deprecated Use PCMK_XA_PROMOTED_NODE_MAX_LEGACY instead
#define XML_RSC_ATTR_MASTER_NODEMAX PCMK_XA_PROMOTED_NODE_MAX_LEGACY
//! \deprecated Do not use (will be removed in a future release)
#define XML_ATTR_RA_VERSION "ra-version"
//! \deprecated Do not use (will be removed in a future release)
#define XML_TAG_FRAGMENT "cib_fragment"
//! \deprecated Do not use (will be removed in a future release)
#define XML_TAG_RSC_VER_ATTRS "rsc_versioned_attrs"
//! \deprecated Do not use (will be removed in a future release)
#define XML_TAG_OP_VER_ATTRS "op_versioned_attrs"
//! \deprecated Do not use (will be removed in a future release)
#define XML_TAG_OP_VER_META "op_versioned_meta"
//! \deprecated Use \p XML_ATTR_ID instead
#define XML_ATTR_UUID "id"
//! \deprecated Use name member directly
#define TYPE(x) (((x) == NULL)? NULL : (const char *) ((x)->name))
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_MSG_XML_COMPAT__H
diff --git a/include/crm/pengine/remote_internal.h b/include/crm/pengine/remote_internal.h
index 46d58fc5e1..6f84946059 100644
--- a/include/crm/pengine/remote_internal.h
+++ b/include/crm/pengine/remote_internal.h
@@ -1,41 +1,40 @@
/*
- * Copyright 2013-2019 the Pacemaker project contributors
+ * Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PE_REMOTE__H
# define PE_REMOTE__H
#ifdef __cplusplus
extern "C" {
#endif
#include // gboolean
#include // xmlNode
#include
bool xml_contains_remote_node(xmlNode *xml);
bool pe__is_remote_node(const pe_node_t *node);
bool pe__is_guest_node(const pe_node_t *node);
bool pe__is_guest_or_remote_node(const pe_node_t *node);
bool pe__is_bundle_node(const pe_node_t *node);
-bool pe__resource_is_remote_conn(const pe_resource_t *rsc,
- const pe_working_set_t *data_set);
+bool pe__resource_is_remote_conn(const pe_resource_t *rsc);
pe_resource_t *pe__resource_contains_guest_node(const pe_working_set_t *data_set,
const pe_resource_t *rsc);
void pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
void (*helper)(const pe_node_t*, void*), void *user_data);
xmlNode *pe_create_remote_xml(xmlNode *parent, const char *uname,
const char *container_id, const char *migrateable,
const char *is_managed, const char *start_timeout,
const char *server, const char *port);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index f3659437d0..2f7fd5c477 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1647 +1,1647 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include // uint8_t, uint32_t
#include
#include
#include "libpacemaker_private.h"
static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
static void promote_resource(pe_resource_t *rsc, pe_node_t *node,
bool optional);
static void assert_role_error(pe_resource_t *rsc, pe_node_t *node,
bool optional);
#define RSC_ROLE_MAX (pcmk_role_promoted + 1)
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the immediate next role when transitioning from one role
* to a target role. For example, when going from Stopped to Promoted, the
* next role is Unpromoted, because the resource must be started before it
* can be promoted. The current state then becomes Started, which is fed
* into this array again, giving a next role of Promoted.
*
* Current role Immediate next role Final target role
* ------------ ------------------- -----------------
*/
/* Unknown */ { pcmk_role_unknown, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_stopped, /* Started */
pcmk_role_stopped, /* Unpromoted */
pcmk_role_stopped, /* Promoted */
},
/* Stopped */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_started, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_unpromoted, /* Promoted */
},
/* Started */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_started, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
/* Unpromoted */ { pcmk_role_stopped, /* Unknown */
pcmk_role_stopped, /* Stopped */
pcmk_role_stopped, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
/* Promoted */ { pcmk_role_stopped, /* Unknown */
pcmk_role_unpromoted, /* Stopped */
pcmk_role_unpromoted, /* Started */
pcmk_role_unpromoted, /* Unpromoted */
pcmk_role_promoted, /* Promoted */
},
};
/*!
* \internal
* \brief Function to schedule actions needed for a role change
*
* \param[in,out] rsc Resource whose role is changing
* \param[in,out] node Node where resource will be in its next role
* \param[in] optional Whether scheduled actions should be optional
*/
typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node,
bool optional);
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the function needed to transition directly from one role
* to another. NULL indicates that nothing is needed.
*
* Current role Transition function Next role
* ------------ ------------------- ----------
*/
/* Unknown */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
assert_role_error, /* Started */
assert_role_error, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Stopped */ { assert_role_error, /* Unknown */
NULL, /* Stopped */
start_resource, /* Started */
start_resource, /* Unpromoted */
assert_role_error, /* Promoted */
},
/* Started */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
NULL, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Unpromoted */ { assert_role_error, /* Unknown */
stop_resource, /* Stopped */
stop_resource, /* Started */
NULL, /* Unpromoted */
promote_resource, /* Promoted */
},
/* Promoted */ { assert_role_error, /* Unknown */
demote_resource, /* Stopped */
demote_resource, /* Started */
demote_resource, /* Unpromoted */
NULL, /* Promoted */
},
};
/*!
* \internal
* \brief Get a list of a resource's allowed nodes sorted by node score
*
* \param[in] rsc Resource to check
*
* \return List of allowed nodes sorted by node score
*/
static GList *
sorted_allowed_nodes(const pe_resource_t *rsc)
{
if (rsc->allowed_nodes != NULL) {
GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
if (nodes != NULL) {
return pcmk__sort_nodes(nodes, pe__current_node(rsc));
}
}
return NULL;
}
/*!
* \internal
* \brief Assign a resource to its best allowed node, if possible
*
* \param[in,out] rsc Resource to choose a node for
* \param[in] prefer If not \c NULL, prefer this node when all else
* equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions
*
* \return true if \p rsc could be assigned to a node, otherwise false
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
static bool
assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer, bool stop_if_fail)
{
GList *nodes = NULL;
pe_node_t *chosen = NULL;
pe_node_t *best = NULL;
const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
if (prefer == NULL) {
prefer = most_free_node;
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
// We've already finished assignment of resources to nodes
return rsc->allocated_to != NULL;
}
// Sort allowed nodes by score
nodes = sorted_allowed_nodes(rsc);
if (nodes != NULL) {
best = (pe_node_t *) nodes->data; // First node has best score
}
if ((prefer != NULL) && (nodes != NULL)) {
// Get the allowed node version of prefer
chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
if (chosen == NULL) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
pe__node_name(prefer), rsc->id);
/* Favor the preferred node as long as its score is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
* node is better, when the best node's score is less than INFINITY.
*/
} else if (chosen->weight < best->weight) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
pe__node_name(chosen), rsc->id);
chosen = NULL;
} else if (!pcmk__node_available(chosen, true, false)) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
pe__node_name(chosen), rsc->id);
chosen = NULL;
} else {
pe_rsc_trace(rsc,
"Chose preferred node %s for %s "
"(ignoring %d candidates)",
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
}
if ((chosen == NULL) && (best != NULL)) {
/* Either there is no preferred node, or the preferred node is not
* suitable, but another node is allowed to run the resource.
*/
chosen = best;
if (!pe_rsc_is_unique_clone(rsc->parent)
&& (chosen->weight > 0) // Zero not acceptable
&& pcmk__node_available(chosen, false, false)) {
/* If the resource is already running on a node, prefer that node if
* it is just as good as the chosen node.
*
* We don't do this for unique clone instances, because
* pcmk__assign_instances() has already assigned instances to their
* running nodes when appropriate, and if we get here, we don't want
* remaining unassigned instances to prefer a node that's already
* running another instance.
*/
pe_node_t *running = pe__current_node(rsc);
if (running == NULL) {
// Nothing to do
} else if (!pcmk__node_available(running, true, false)) {
pe_rsc_trace(rsc,
"Current node for %s (%s) can't run resources",
rsc->id, pe__node_name(running));
} else {
int nodes_with_best_score = 1;
for (GList *iter = nodes->next; iter; iter = iter->next) {
pe_node_t *allowed = (pe_node_t *) iter->data;
if (allowed->weight != chosen->weight) {
// The nodes are sorted by score, so no more are equal
break;
}
if (pe__same_node(allowed, running)) {
// Scores are equal, so prefer the current node
chosen = allowed;
}
nodes_with_best_score++;
}
if (nodes_with_best_score > 1) {
uint8_t log_level = LOG_INFO;
if (chosen->weight >= INFINITY) {
log_level = LOG_WARNING;
}
do_crm_log(log_level,
"Chose %s for %s from %d nodes with score %s",
pe__node_name(chosen), rsc->id,
nodes_with_best_score,
pcmk_readable_score(chosen->weight));
}
}
}
pe_rsc_trace(rsc, "Chose %s for %s from %d candidates",
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
pcmk__assign_resource(rsc, chosen, false, stop_if_fail);
g_list_free(nodes);
return rsc->allocated_to != NULL;
}
/*!
* \internal
* \brief Apply a "this with" colocation to a node's allowed node scores
*
* \param[in,out] colocation Colocation to apply
* \param[in,out] rsc Resource being assigned
*/
static void
apply_this_with(pcmk__colocation_t *colocation, pe_resource_t *rsc)
{
GHashTable *archive = NULL;
pe_resource_t *other = colocation->primary;
// In certain cases, we will need to revert the node scores
if ((colocation->dependent_role >= pcmk_role_promoted)
|| ((colocation->score < 0) && (colocation->score > -INFINITY))) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
if (pcmk_is_set(other->flags, pe_rsc_provisional)) {
pe_rsc_trace(rsc,
"%s: Assigning colocation %s primary %s first"
"(score=%d role=%s)",
rsc->id, colocation->id, other->id,
colocation->score, role2text(colocation->dependent_role));
other->cmds->assign(other, NULL, true);
}
// Apply the colocation score to this resource's allowed node scores
rsc->cmds->apply_coloc_score(rsc, other, colocation, true);
if ((archive != NULL)
&& !pcmk__any_node_available(rsc->allowed_nodes)) {
pe_rsc_info(rsc,
"%s: Reverting scores from colocation with %s "
"because no nodes allowed",
rsc->id, other->id);
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = archive;
archive = NULL;
}
if (archive != NULL) {
g_hash_table_destroy(archive);
}
}
/*!
* \internal
* \brief Update a Pacemaker Remote node once its connection has been assigned
*
* \param[in] connection Connection resource that has been assigned
*/
static void
remote_connection_assigned(const pe_resource_t *connection)
{
pe_node_t *remote_node = pe_find_node(connection->cluster->nodes,
connection->id);
CRM_CHECK(remote_node != NULL, return);
if ((connection->allocated_to != NULL)
&& (connection->next_role != pcmk_role_stopped)) {
crm_trace("Pacemaker Remote node %s will be online",
remote_node->details->id);
remote_node->details->online = TRUE;
if (remote_node->details->unseen) {
// Avoid unnecessary fence, since we will attempt connection
remote_node->details->unclean = FALSE;
}
} else {
crm_trace("Pacemaker Remote node %s will be shut down "
"(%sassigned connection's next role is %s)",
remote_node->details->id,
((connection->allocated_to == NULL)? "un" : ""),
role2text(connection->next_role));
remote_node->details->shutdown = TRUE;
}
}
/*!
* \internal
* \brief Assign a primitive resource to a node
*
* \param[in,out] rsc Resource to assign to a node
* \param[in] prefer Node to prefer, if all else is equal
* \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
* node, set next role to stopped and update
* existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
*
* \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
* completely undo the assignment. A successful assignment can be either
* undone or left alone as final. A failed assignment has the same effect
* as calling pcmk__unassign_resource(); there are no side effects on
* roles or actions.
*/
pe_node_t *
pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer,
bool stop_if_fail)
{
GList *this_with_colocations = NULL;
GList *with_this_colocations = NULL;
GList *iter = NULL;
pcmk__colocation_t *colocation = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native));
// Never assign a child without parent being assigned first
if ((rsc->parent != NULL)
&& !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "%s: Assigning parent %s first",
rsc->id, rsc->parent->id);
rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
// Assignment has already been done
const char *node_name = "no node";
if (rsc->allocated_to != NULL) {
node_name = pe__node_name(rsc->allocated_to);
}
pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name);
return rsc->allocated_to;
}
// Ensure we detect assignment loops
if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
pe__set_resource_flags(rsc, pe_rsc_allocating);
pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes,
rsc->cluster);
this_with_colocations = pcmk__this_with_colocations(rsc);
with_this_colocations = pcmk__with_this_colocations(rsc);
// Apply mandatory colocations first, to satisfy as many as possible
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score <= -CRM_SCORE_INFINITY)
|| (colocation->score >= CRM_SCORE_INFINITY)) {
apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score <= -CRM_SCORE_INFINITY)
|| (colocation->score >= CRM_SCORE_INFINITY)) {
pcmk__add_dependent_scores(colocation, rsc);
}
}
pe__show_node_scores(true, rsc, "Mandatory-colocations",
rsc->allowed_nodes, rsc->cluster);
// Then apply optional colocations
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score > -CRM_SCORE_INFINITY)
&& (colocation->score < CRM_SCORE_INFINITY)) {
apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
if ((colocation->score > -CRM_SCORE_INFINITY)
&& (colocation->score < CRM_SCORE_INFINITY)) {
pcmk__add_dependent_scores(colocation, rsc);
}
}
g_list_free(this_with_colocations);
g_list_free(with_this_colocations);
if (rsc->next_role == pcmk_role_stopped) {
pe_rsc_trace(rsc,
"Banning %s from all nodes because it will be stopped",
rsc->id);
resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE,
rsc->cluster);
} else if ((rsc->next_role > rsc->role)
&& !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum)
&& (rsc->cluster->no_quorum_policy == no_quorum_freeze)) {
crm_notice("Resource %s cannot be elevated from %s to %s due to "
"no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
}
pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
rsc, __func__, rsc->allowed_nodes, rsc->cluster);
// Unmanage resource if fencing is enabled but no device is configured
if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
&& !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
// Unmanaged resources stay on their current node
const char *reason = NULL;
pe_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
} else if (rsc->role == pcmk_role_promoted) {
reason = "promoted";
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
pcmk__assign_resource(rsc, assign_to, true, stop_if_fail);
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) {
// Must stop at some point, but be consistent with stop_if_fail
if (stop_if_fail) {
pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources",
rsc->id);
}
pcmk__assign_resource(rsc, NULL, true, stop_if_fail);
} else if (!assign_best_node(rsc, prefer, stop_if_fail)) {
// Assignment failed
if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
} else if ((rsc->running_on != NULL) && stop_if_fail) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
}
pe__clear_resource_flags(rsc, pe_rsc_allocating);
if (rsc->is_remote_node) {
remote_connection_assigned(rsc);
}
return rsc->allocated_to;
}
/*!
* \internal
* \brief Schedule actions to bring resource down and back to current role
*
* \param[in,out] rsc Resource to restart
* \param[in,out] current Node that resource should be brought down on
* \param[in] need_stop Whether the resource must be stopped
* \param[in] need_promote Whether the resource must be promoted
*
* \return Role that resource would have after scheduled actions are taken
*/
static void
schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
bool need_stop, bool need_promote)
{
enum rsc_role_e role = rsc->role;
enum rsc_role_e next_role;
rsc_transition_fn fn = NULL;
pe__set_resource_flags(rsc, pe_rsc_restarting);
// Bring resource down to a stop on its current node
while (role != pcmk_role_stopped) {
next_role = rsc_state_matrix[role][pcmk_role_stopped];
pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, current, !need_stop);
role = next_role;
}
// Bring resource up to its next role on its next node
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
&& !pcmk_is_set(rsc->flags, pe_rsc_block)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
if ((next_role == pcmk_role_promoted) && need_promote) {
required = true;
}
pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
(required? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, !required);
role = next_role;
}
pe__clear_resource_flags(rsc, pe_rsc_restarting);
}
/*!
* \internal
* \brief If a resource's next role is not explicitly specified, set a default
*
* \param[in,out] rsc Resource to set next role for
*
* \return "explicit" if next role was explicitly set, otherwise "implicit"
*/
static const char *
set_default_next_role(pe_resource_t *rsc)
{
if (rsc->next_role != pcmk_role_unknown) {
return "explicit";
}
if (rsc->allocated_to == NULL) {
pe__set_next_role(rsc, pcmk_role_stopped, "assignment");
} else {
pe__set_next_role(rsc, pcmk_role_started, "assignment");
}
return "implicit";
}
/*!
* \internal
* \brief Create an action to represent an already pending start
*
* \param[in,out] rsc Resource to create start action for
*/
static void
create_pending_start(pe_resource_t *rsc)
{
pe_action_t *start = NULL;
pe_rsc_trace(rsc,
"Creating action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
pe__set_action_flags(start, pe_action_print_always);
}
/*!
* \internal
* \brief Schedule actions needed to take a resource to its next role
*
* \param[in,out] rsc Resource to schedule actions for
*/
static void
schedule_role_transition_actions(pe_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
while (role != rsc->next_role) {
enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role];
rsc_transition_fn fn = NULL;
pe_rsc_trace(rsc,
"Creating action to take %s from %s to %s (ending at %s)",
rsc->id, role2text(role), role2text(next_role),
role2text(rsc->next_role));
fn = rsc_action_matrix[role][next_role];
if (fn == NULL) {
break;
}
fn(rsc, rsc->allocated_to, false);
role = next_role;
}
}
/*!
* \internal
* \brief Create all actions needed for a given primitive resource
*
* \param[in,out] rsc Primitive resource to create actions for
*/
void
pcmk__primitive_create_actions(pe_resource_t *rsc)
{
bool need_stop = false;
bool need_promote = false;
bool is_moving = false;
bool allow_migrate = false;
bool multiply_active = false;
pe_node_t *current = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
const char *next_role_source = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native));
next_role_source = set_default_next_role(rsc);
pe_rsc_trace(rsc,
"Creating all actions for %s transition from %s to %s "
"(%s) on %s",
rsc->id, role2text(rsc->role), role2text(rsc->next_role),
next_role_source, pe__node_name(rsc->allocated_to));
current = rsc->fns->active_node(rsc, &num_all_active, &num_clean_active);
g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration,
rsc);
if ((current != NULL) && (rsc->allocated_to != NULL)
&& !pe__same_node(current, rsc->allocated_to)
&& (rsc->next_role >= pcmk_role_started)) {
pe_rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, pe__node_name(current),
pe__node_name(rsc->allocated_to));
is_moving = true;
allow_migrate = pcmk__rsc_can_migrate(rsc, current);
// This is needed even if migrating (though I'm not sure why ...)
need_stop = true;
}
// Check whether resource is partially migrated and/or multiply active
if ((rsc->partial_migration_source != NULL)
&& (rsc->partial_migration_target != NULL)
&& allow_migrate && (num_all_active == 2)
&& pe__same_node(current, rsc->partial_migration_source)
&& pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) {
/* A partial migration is in progress, and the migration target remains
* the same as when the migration began.
*/
pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue",
rsc->id, pe__node_name(rsc->partial_migration_source),
pe__node_name(rsc->partial_migration_target));
} else if ((rsc->partial_migration_source != NULL)
|| (rsc->partial_migration_target != NULL)) {
// A partial migration is in progress but can't be continued
if (num_all_active > 2) {
// The resource is migrating *and* multiply active!
crm_notice("Forcing recovery of %s because it is migrating "
"from %s to %s and possibly active elsewhere",
rsc->id, pe__node_name(rsc->partial_migration_source),
pe__node_name(rsc->partial_migration_target));
} else {
// The migration source or target isn't available
crm_notice("Forcing recovery of %s because it can no longer "
"migrate from %s to %s",
rsc->id, pe__node_name(rsc->partial_migration_source),
pe__node_name(rsc->partial_migration_target));
}
need_stop = true;
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = false;
} else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
multiply_active = (num_all_active > 1);
} else {
/* If a resource has "requires" set to nothing or quorum, don't consider
* it active on unclean nodes (similar to how all resources behave when
* stonith-enabled is false). We can start such resources elsewhere
* before fencing completes, and if we considered the resource active on
* the failed node, we would attempt recovery for being active on
* multiple nodes.
*/
multiply_active = (num_clean_active > 1);
}
if (multiply_active) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Resource was (possibly) incorrectly multiply active
pe_proc_err("%s resource %s might be active on %u nodes (%s)",
pcmk__s(class, "Untyped"), rsc->id, num_all_active,
recovery2text(rsc->recovery_type));
crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ"
"#Resource_is_Too_Active for more information");
switch (rsc->recovery_type) {
case pcmk_multiply_active_restart:
need_stop = true;
break;
case pcmk_multiply_active_unexpected:
need_stop = true; // stop_resource() will skip expected node
pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
break;
default:
break;
}
} else {
pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
}
if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
create_pending_start(rsc);
}
if (is_moving) {
// Remaining tests are only for resources staying where they are
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
need_stop = true;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
if (rsc->next_role == pcmk_role_promoted) {
need_promote = true;
}
}
} else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = true;
} else if ((rsc->role > pcmk_role_started) && (current != NULL)
&& (rsc->allocated_to != NULL)) {
pe_action_t *start = NULL;
pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
if (!pcmk_is_set(start->flags, pe_action_optional)) {
// Recovery of a promoted resource
pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = true;
}
}
// Create any actions needed to bring resource down and back up to same role
schedule_restart_actions(rsc, current, need_stop, need_promote);
// Create any actions needed to take resource from this role to the next
schedule_role_transition_actions(rsc);
pcmk__create_recurring_actions(rsc);
if (allow_migrate) {
pcmk__create_migration_actions(rsc, current);
}
}
/*!
* \internal
* \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes
*
* \param[in] rsc Resource to check
*/
static void
rsc_avoids_remote_nodes(const pe_resource_t *rsc)
{
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (node->details->remote_rsc != NULL) {
node->weight = -INFINITY;
}
}
}
/*!
* \internal
* \brief Return allowed nodes as (possibly sorted) list
*
* Convert a resource's hash table of allowed nodes to a list. If printing to
* stdout, sort the list, to keep action ID numbers consistent for regression
* test output (while avoiding the performance hit on a live cluster).
*
* \param[in] rsc Resource to check for allowed nodes
*
* \return List of resource's allowed nodes
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
allowed_nodes_as_list(const pe_resource_t *rsc)
{
GList *allowed_nodes = NULL;
if (rsc->allowed_nodes) {
allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
if (!pcmk__is_daemon) {
allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
}
return allowed_nodes;
}
/*!
* \internal
* \brief Create implicit constraints needed for a primitive resource
*
* \param[in,out] rsc Primitive resource to create implicit constraints for
*/
void
pcmk__primitive_internal_constraints(pe_resource_t *rsc)
{
GList *allowed_nodes = NULL;
bool check_unfencing = false;
bool check_utilization = false;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native));
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping implicit constraints for unmanaged resource %s",
rsc->id);
return;
}
// Whether resource requires unfencing
check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
&& pcmk_is_set(rsc->cluster->flags,
pe_flag_enable_unfencing)
&& pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
&& !pcmk__str_eq(rsc->cluster->placement_strategy,
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL,
pe_order_optional|pe_order_implies_then|pe_order_restart,
rsc->cluster);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
pe_rsc_promotable)
|| (rsc->role > pcmk_role_unpromoted)) {
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0),
NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
NULL,
pe_order_promoted_implies_first, rsc->cluster);
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0),
NULL,
pe_order_runnable_left, rsc->cluster);
}
// Don't clear resource history if probing on same node
pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
NULL, rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
NULL, pe_order_same_node|pe_order_then_cancels_first,
rsc->cluster);
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || (rsc->container != NULL)) {
allowed_nodes = allowed_nodes_as_list(rsc);
}
if (check_unfencing) {
g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc);
}
if (check_utilization) {
pcmk__create_utilization_constraints(rsc, allowed_nodes);
}
if (rsc->container != NULL) {
pe_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
/* Guest resources are not allowed to run on Pacemaker Remote nodes,
* to avoid nesting remotes. However, bundles are allowed.
*/
if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
rsc_avoids_remote_nodes(rsc->container);
}
/* If someone cleans up a guest or bundle node's container, we will
* likely schedule a (re-)probe of the container and recovery of the
* connection. Order the connection stop after the container probe,
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR,
rsc, PCMK_ACTION_STOP,
pe_order_optional);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
* meta-attribute. This is of questionable merit, since location
* constraints can accomplish the same thing. But we support it, so here
* we check whether a resource (that is not itself a remote connection)
* has container set to a remote node or guest node resource.
*/
} else if (rsc->container->is_remote_node) {
remote_rsc = rsc->container;
} else {
remote_rsc = pe__resource_contains_guest_node(rsc->cluster,
rsc->container);
}
if (remote_rsc != NULL) {
/* Force the resource on the Pacemaker Remote node instead of
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
pe_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
}
}
} else {
/* This resource is either a filler for a container that does NOT
* represent a Pacemaker Remote node, or a Pacemaker Remote
* connection resource for a guest node or bundle.
*/
int score;
crm_trace("Order and colocate %s relative to its container %s",
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
pcmk__op_key(rsc->container->id,
PCMK_ACTION_START, 0),
NULL, rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
pe_order_implies_then|pe_order_runnable_left,
rsc->cluster);
pcmk__new_ordering(rsc,
pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
NULL,
rsc->container,
pcmk__op_key(rsc->container->id,
PCMK_ACTION_STOP, 0),
NULL, pe_order_implies_first, rsc->cluster);
if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
pcmk__new_colocation("#resource-with-container", NULL, score, rsc,
rsc->container, NULL, NULL,
pcmk__coloc_influence);
}
}
if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
/* Remote connections and fencing devices are not allowed to run on
* Pacemaker Remote nodes
*/
rsc_avoids_remote_nodes(rsc);
}
g_list_free(allowed_nodes);
}
/*!
* \internal
* \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
* allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
void
pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
const pe_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
enum pcmk__coloc_affects filter_results;
CRM_ASSERT((dependent != NULL) && (primary != NULL)
&& (colocation != NULL));
if (for_dependent) {
// Always process on behalf of primary resource
primary->cmds->apply_coloc_score(dependent, primary, colocation, false);
return;
}
filter_results = pcmk__colocation_affects(dependent, primary, colocation,
false);
pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
((colocation->score > 0)? "Colocating" : "Anti-colocating"),
dependent->id, primary->id, colocation->id, colocation->score,
filter_results);
switch (filter_results) {
case pcmk__coloc_affects_role:
pcmk__apply_coloc_to_priority(dependent, primary, colocation);
break;
case pcmk__coloc_affects_location:
pcmk__apply_coloc_to_scores(dependent, primary, colocation);
break;
default: // pcmk__coloc_affects_nothing
return;
}
}
/* Primitive implementation of
* resource_alloc_functions_t:with_this_colocations()
*/
void
pcmk__with_primitive_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (list != NULL));
if (rsc == orig_rsc) {
/* For the resource itself, add all of its own colocations and relevant
* colocations from its parent (if any).
*/
pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
if (rsc->parent != NULL) {
rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list);
}
} else {
// For an ancestor, add only explicitly configured constraints
for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
pcmk__add_with_this(list, colocation, orig_rsc);
}
}
}
}
/* Primitive implementation of
* resource_alloc_functions_t:this_with_colocations()
*/
void
pcmk__primitive_with_colocations(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList **list)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (list != NULL));
if (rsc == orig_rsc) {
/* For the resource itself, add all of its own colocations and relevant
* colocations from its parent (if any).
*/
pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
if (rsc->parent != NULL) {
rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list);
}
} else {
// For an ancestor, add only explicitly configured constraints
for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
pcmk__colocation_t *colocation = iter->data;
if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
pcmk__add_this_with(list, colocation, orig_rsc);
}
}
}
}
/*!
* \internal
* \brief Return action flags for a given primitive resource action
*
* \param[in,out] action Action to get flags for
* \param[in] node If not NULL, limit effects to this node (ignored)
*
* \return Flags appropriate to \p action on \p node
*/
uint32_t
pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
{
CRM_ASSERT(action != NULL);
return (uint32_t) action->flags;
}
/*!
* \internal
* \brief Check whether a node is a multiply active resource's expected node
*
* \param[in] rsc Resource to check
* \param[in] node Node to check
*
* \return true if \p rsc is multiply active with multiple-active set to
* stop_unexpected, and \p node is the node where it will remain active
* \note This assumes that the resource's next role cannot be changed to stopped
* after this is called, which should be reasonable if status has already
* been unpacked and resources have been assigned to nodes.
*/
static bool
is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
{
return pcmk_all_flags_set(rsc->flags,
pe_rsc_stop_unexpected|pe_rsc_restarting)
&& (rsc->next_role > pcmk_role_stopped)
&& pe__same_node(rsc->allocated_to, node);
}
/*!
* \internal
* \brief Schedule actions needed to stop a resource wherever it is active
*
* \param[in,out] rsc Resource being stopped
* \param[in] node Node where resource is being stopped (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *current = (pe_node_t *) iter->data;
pe_action_t *stop = NULL;
if (is_expected_node(rsc, current)) {
/* We are scheduling restart actions for a multiply active resource
* with multiple-active=stop_unexpected, and this is where it should
* not be stopped.
*/
pe_rsc_trace(rsc,
"Skipping stop of multiply active resource %s "
"on expected node %s",
rsc->id, pe__node_name(current));
continue;
}
if (rsc->partial_migration_target != NULL) {
// Continue migration if node originally was and remains target
if (pe__same_node(current, rsc->partial_migration_target)
&& pe__same_node(current, rsc->allocated_to)) {
pe_rsc_trace(rsc,
"Skipping stop of %s on %s "
"because partial migration there will continue",
rsc->id, pe__node_name(current));
continue;
} else {
pe_rsc_trace(rsc,
"Forcing stop of %s on %s "
"because migration target changed",
rsc->id, pe__node_name(current));
optional = false;
}
}
pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
rsc->id, pe__node_name(current));
stop = stop_action(rsc, current, optional);
if (rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", true);
} else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
|pe_rsc_stop_unexpected)) {
/* We are stopping a multiply active resource on a node that is
* not its expected node, and we are still scheduling restart
* actions, so the stop is for being multiply active.
*/
pe_action_set_reason(stop, "being multiply active", true);
}
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pe__clear_action_flags(stop, pe_action_runnable);
}
if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
pcmk__schedule_cleanup(rsc, current, optional);
}
if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
pe_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true,
NULL, false, rsc->cluster);
order_actions(stop, unfence, pe_order_implies_first);
if (!pcmk__node_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced",
rsc->id, pe__node_name(current));
}
}
}
}
/*!
* \internal
* \brief Schedule actions needed to start a resource on a node
*
* \param[in,out] rsc Resource being started
* \param[in,out] node Node where resource should be started
* \param[in] optional Whether actions should be optional
*/
static void
start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
pe_action_t *start = NULL;
CRM_ASSERT(node != NULL);
pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
(optional? "optional" : "required"), rsc->id,
pe__node_name(node), node->weight);
start = start_action(rsc, node, TRUE);
pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then);
if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
pe__clear_action_flags(start, pe_action_optional);
}
if (is_expected_node(rsc, node)) {
/* This could be a problem if the start becomes necessary for other
* reasons later.
*/
pe_rsc_trace(rsc,
"Start of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
pe__set_action_flags(start, pe_action_pseudo);
}
}
/*!
* \internal
* \brief Schedule actions needed to promote a resource on a node
*
* \param[in,out] rsc Resource being promoted
* \param[in] node Node where resource should be promoted
* \param[in] optional Whether actions should be optional
*/
static void
promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
GList *iter = NULL;
GList *action_list = NULL;
bool runnable = true;
CRM_ASSERT(node != NULL);
// Any start must be runnable for promotion to be runnable
action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pe_action_t *start = (pe_action_t *) iter->data;
if (!pcmk_is_set(start->flags, pe_action_runnable)) {
runnable = false;
}
}
g_list_free(action_list);
if (runnable) {
pe_action_t *promote = promote_action(rsc, node, optional);
pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pe__node_name(node));
if (is_expected_node(rsc, node)) {
/* This could be a problem if the promote becomes necessary for
* other reasons later.
*/
pe_rsc_trace(rsc,
"Promotion of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
pe__set_action_flags(promote, pe_action_pseudo);
}
} else {
pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
rsc->id, pe__node_name(node));
action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE,
true);
for (iter = action_list; iter != NULL; iter = iter->next) {
pe_action_t *promote = (pe_action_t *) iter->data;
pe__clear_action_flags(promote, pe_action_runnable);
}
g_list_free(action_list);
}
}
/*!
* \internal
* \brief Schedule actions needed to demote a resource wherever it is active
*
* \param[in,out] rsc Resource being demoted
* \param[in] node Node where resource should be demoted (ignored)
* \param[in] optional Whether actions should be optional
*/
static void
demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
/* Since this will only be called for a primitive (possibly as an instance
* of a collective resource), the resource is multiply active if it is
* running on more than one node, so we want to demote on all of them as
* part of recovery, regardless of which one is the desired node.
*/
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *current = (pe_node_t *) iter->data;
if (is_expected_node(rsc, current)) {
pe_rsc_trace(rsc,
"Skipping demote of multiply active resource %s "
"on expected node %s",
rsc->id, pe__node_name(current));
} else {
pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
pe__node_name(current));
demote_action(rsc, current, optional);
}
}
}
static void
assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
{
CRM_ASSERT(false);
}
/*!
* \internal
* \brief Schedule cleanup of a resource
*
* \param[in,out] rsc Resource to clean up
* \param[in] node Node to clean up on
* \param[in] optional Whether clean-up should be optional
*/
void
pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
{
/* If the cleanup is required, its orderings are optional, because they're
* relevant only if both actions are required. Conversely, if the cleanup is
* optional, the orderings make the then action required if the first action
* becomes required.
*/
uint32_t flag = optional? pe_order_implies_then : pe_order_optional;
CRM_CHECK((rsc != NULL) && (node != NULL), return);
if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
rsc->id, pe__node_name(node));
return;
}
if (node->details->unclean || !node->details->online) {
pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
rsc->id, pe__node_name(node));
return;
}
crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node));
delete_action(rsc, node, optional);
// stop -> clean-up -> start
pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
rsc, PCMK_ACTION_DELETE, flag);
pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE,
rsc, PCMK_ACTION_START, flag);
}
/*!
* \internal
* \brief Add primitive meta-attributes relevant to graph actions to XML
*
* \param[in] rsc Primitive resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
char *value = NULL;
const pe_resource_t *parent = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native) && (xml != NULL));
/* Clone instance numbers get set internally as meta-attributes, and are
* needed in the transition graph (for example, to tell unique clone
* instances apart).
*/
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
if (value != NULL) {
name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
crm_xml_add(xml, name, value);
free(name);
}
// Not sure if this one is really needed ...
value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
if (value != NULL) {
name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
crm_xml_add(xml, name, value);
free(name);
}
/* The container meta-attribute can be set on the primitive itself or one of
* its parents (for example, a group inside a container resource), so check
* them all, and keep the highest one found.
*/
for (parent = rsc; parent != NULL; parent = parent->parent) {
if (parent->container != NULL) {
crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER,
parent->container->id);
}
}
/* Bundle replica children will get their external-ip set internally as a
* meta-attribute. The graph action needs it, but under a different naming
* convention than other meta-attributes.
*/
value = g_hash_table_lookup(rsc->meta, "external-ip");
if (value != NULL) {
crm_xml_add(xml, "pcmk_external_ip", value);
}
}
// Primitive implementation of resource_alloc_functions_t:add_utilization()
void
pcmk__primitive_add_utilization(const pe_resource_t *rsc,
const pe_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native)
&& (orig_rsc != NULL) && (utilization != NULL));
if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
orig_rsc->id, rsc->id);
pcmk__release_node_capacity(utilization, rsc);
}
/*!
* \internal
* \brief Get epoch time of node's shutdown attribute (or now if none)
*
* \param[in,out] node Node to check
*
* \return Epoch time corresponding to shutdown attribute if set or now if not
*/
static time_t
shutdown_time(pe_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
time_t result = 0;
if (shutdown != NULL) {
long long result_ll;
if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
result = (time_t) result_ll;
}
}
return (result == 0)? get_effective_time(node->details->data_set) : result;
}
/*!
* \internal
* \brief Ban a resource from a node if it's not locked to the node
*
* \param[in] data Node to check
* \param[in,out] user_data Resource to check
*/
static void
ban_if_not_locked(gpointer data, gpointer user_data)
{
const pe_node_t *node = (const pe_node_t *) data;
pe_resource_t *rsc = (pe_resource_t *) user_data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
resource_location(rsc, node, -CRM_SCORE_INFINITY,
XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
}
}
// Primitive implementation of resource_alloc_functions_t:shutdown_lock()
void
pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
{
const char *class = NULL;
CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_native));
class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Fence devices and remote connections can't be locked
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
- || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
+ || pe__resource_is_remote_conn(rsc)) {
return;
}
if (rsc->lock_node != NULL) {
// The lock was obtained from resource history
if (rsc->running_on != NULL) {
/* The resource was started elsewhere even though it is now
* considered locked. This shouldn't be possible, but as a
* failsafe, we don't want to disturb the resource now.
*/
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
pe_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
rsc->id, pe__node_name(node));
} else {
rsc->lock_node = node;
rsc->lock_time = shutdown_time(node);
}
}
}
if (rsc->lock_node == NULL) {
// No lock needed
return;
}
if (rsc->cluster->shutdown_lock > 0) {
time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, pe__node_name(rsc->lock_node),
(long long) lock_expiration);
pe__update_recheck_time(++lock_expiration, rsc->cluster);
} else {
pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, pe__node_name(rsc->lock_node));
}
// If resource is locked to one node, ban it from all other nodes
g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc);
}
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 233c617482..c3d11dc887 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1178 +1,1178 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean);
resource_object_functions_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_print,
native_active,
native_resource_state,
native_location,
native_free,
pe__count_common,
pe__native_is_filtered,
active_node,
pe__primitive_max_per_node,
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_print,
group_active,
group_resource_state,
native_location,
group_free,
pe__count_common,
pe__group_is_filtered,
active_node,
pe__group_max_per_node,
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free,
pe__count_common,
pe__clone_is_filtered,
active_node,
pe__clone_max_per_node,
},
{
pe__unpack_bundle,
native_find_rsc,
native_parameter,
pe__print_bundle,
pe__bundle_active,
pe__bundle_resource_state,
native_location,
pe__free_bundle,
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
pe__bundle_max_per_node,
}
};
static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
return pe_native;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
return pe_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
return pe_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
return pe_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
return pe_container;
}
return pe_unknown;
}
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
add_hash_param(user_data, key, value);
}
static void
expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
pe_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
}
/* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
rule_data, parent_orig_meta, NULL, FALSE, data_set);
p = p->parent;
}
/* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
if (parent_orig_meta != NULL) {
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, parent_orig_meta);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
/* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
/* Attributes that already exist in the child lease are not updated. */
dup_attr(key, value, meta_hash);
}
}
if (parent_orig_meta != NULL) {
g_hash_table_destroy(parent_orig_meta);
}
return ;
}
void
get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_node_t * node, pe_working_set_t * data_set)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
const char *prop_value = pcmk__xml_attr_value(a);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
}
/* check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
if (rsc->parent) {
g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
}
}
void
get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
const pe_node_t *node, pe_working_set_t *data_set)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
if (node) {
rule_data.node_hash = node->details->attrs;
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
meta_hash, NULL, FALSE, data_set);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
&rule_data, meta_hash, NULL, FALSE, data_set);
}
}
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, "name");
const char *role = crm_element_value(op, "role");
char *key = NULL;
if ((role == NULL)
|| pcmk__strcase_any_of(role, PCMK__ROLE_STARTED, PCMK__ROLE_UNPROMOTED,
PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
role = PCMK__ROLE_UNKNOWN;
}
key = crm_strdup_printf("%s-%s", name, role);
return key;
}
static gboolean
unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *clone = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
}
template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
XML_ATTR_ID, template_ref);
if (template == NULL) {
pe_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_replace(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
}
template_ops = find_xml_node(new_xml, "operations", FALSE);
for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
child_xml = pcmk__xe_next(child_xml)) {
xmlNode *new_child = NULL;
new_child = add_node_copy(new_xml, child_xml);
if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = pcmk__xe_first_child(template_ops); op != NULL;
op = pcmk__xe_next(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
add_node_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
free_xml(template_ops);
}
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
/* Disable multi-level templates for now */
/*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
} */
return TRUE;
}
static gboolean
add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for processing resource list of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
static bool
detect_promotable(pe_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
if (crm_is_true(promotable)) {
return TRUE;
}
// @COMPAT deprecated since 2.0.0
if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
strdup(XML_BOOLEAN_TRUE));
return TRUE;
}
return FALSE;
}
static void
free_params_table(gpointer data)
{
g_hash_table_destroy((GHashTable *) data);
}
/*!
* \brief Get a table of resource parameters
*
* \param[in,out] rsc Resource to query
* \param[in] node Node for evaluating rules (NULL for defaults)
* \param[in,out] data_set Cluster working set
*
* \return Hash table containing resource parameter names and values
* (or NULL if \p rsc or \p data_set is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
pe_working_set_t *data_set)
{
GHashTable *params_on_node = NULL;
/* A NULL node is used to request the resource's default parameters
* (not evaluated for node), but we always want something non-NULL
* as a hash table key.
*/
const char *node_name = "";
// Sanity check
if ((rsc == NULL) || (data_set == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
node_name = node->details->uname;
}
// Find the parameter table for given node
if (rsc->parameter_cache == NULL) {
rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
} else {
params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
}
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
get_rsc_attributes(params_on_node, rsc, node, data_set);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
return params_on_node;
}
/*!
* \internal
* \brief Unpack a resource's "requires" meta-attribute
*
* \param[in,out] rsc Resource being unpacked
* \param[in] value Value of "requires" meta-attribute
* \param[in] is_default Whether \p value was selected by default
*/
static void
unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
} else {
pe__set_resource_flags(rsc,
pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
} else if ((rsc->variant == pe_native)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
} else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
value = PCMK__VALUE_FENCING;
} else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
value = PCMK__VALUE_QUORUM;
}
if (orig_value != NULL) {
pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
"to '%s' because '%s' is not valid",
rsc->id, value, orig_value);
}
unpack_requires(rsc, value, true);
return;
}
pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
(is_default? " (default)" : ""));
}
#ifndef PCMK__COMPAT_2_0
static void
warn_about_deprecated_classes(pe_resource_t *rsc)
{
const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
pe_warn_once(pe_wo_upstart,
"Support for Upstart resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
} else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
pe_warn_once(pe_wo_nagios,
"Support for Nagios resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
}
}
#endif
/*!
* \internal
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
* \c pe_resource_t object.
*
* \param[in] xml_obj XML node containing the resource's configuration
* \param[out] rsc Where to store the unpacked resource information
* \param[in] parent Resource's parent, if any
* \param[in,out] data_set Cluster working set
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
* the caller is responsible for freeing it using its variant-specific
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_resource_t *parent, pe_working_set_t *data_set)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
const char *value = NULL;
const char *id = NULL;
bool guest_node = false;
bool remote_node = false;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
CRM_CHECK(rsc != NULL, return EINVAL);
CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
*rsc = NULL;
return EINVAL);
rule_data.now = data_set->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
xml_obj->name);
return pcmk_rc_unpack_error;
}
if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
return pcmk_rc_unpack_error;
}
*rsc = calloc(1, sizeof(pe_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
(*rsc)->cluster = data_set;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
(*rsc)->xml = expanded_xml;
(*rsc)->orig_xml = xml_obj;
} else {
(*rsc)->xml = xml_obj;
(*rsc)->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
(*rsc)->ops_xml = expand_idref(ops, data_set->input);
(*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
if ((*rsc)->variant == pe_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
id, (*rsc)->xml->name);
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
#ifndef PCMK__COMPAT_2_0
warn_about_deprecated_classes(*rsc);
#endif
(*rsc)->meta = pcmk__strkey_table(free, free);
(*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
(*rsc)->known_on = pcmk__strkey_table(NULL, free);
value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
if (value) {
(*rsc)->id = crm_strdup_printf("%s:%s", id, value);
add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
} else {
(*rsc)->id = strdup(id);
}
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
(*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
(*rsc)->flags = 0;
pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe__set_resource_flags(*rsc, pe_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = pcmk_role_stopped;
(*rsc)->next_role = pcmk_role_unknown;
(*rsc)->recovery_type = pcmk_multiply_active_restart;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
(*rsc)->priority = char2score(value);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
guest_node = true;
} else {
remote_node = true;
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_managed);
} else {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
pe__clear_resource_flags(*rsc, pe_rsc_managed);
pe__set_resource_flags(*rsc, pe_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
pe__set_resource_flags(*rsc, pe_rsc_unique);
}
if (detect_promotable(*rsc)) {
pe__set_resource_flags(*rsc, pe_rsc_promotable);
}
} else {
pe__set_resource_flags(*rsc, pe_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
pe_warn_once(pe_wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
(*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_stop;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
(*rsc)->recovery_type = pcmk_multiply_active_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
} else { // "stop_start"
if (!pcmk__str_eq(value, "stop_start",
pcmk__str_casei|pcmk__str_null_matches)) {
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
(*rsc)->recovery_type = pcmk_multiply_active_restart;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->stickiness = char2score(value);
}
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MIGRATION_THRESHOLD);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
/* @TODO We use 1 here to preserve previous behavior, but this
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
pe_warn_once(pe_wo_neg_threshold,
- XML_RSC_ATTR_FAIL_STICKINESS
+ PCMK_META_MIGRATION_THRESHOLD
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
}
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
pe__set_resource_flags(*rsc, pe_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_FAILURE_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
* reconnect_interval.
*
* @TODO Evaluate per node before using
*/
value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
(*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
/* we want to override any default failure_timeout in use when remote
* reconnect_interval is in use. */
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
(*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
(*rsc)->utilization, NULL, FALSE, data_set);
if (expanded_xml) {
if (add_template_rsc(xml_obj, data_set) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
}
return pcmk_rc_ok;
}
gboolean
is_parent(pe_resource_t *child, pe_resource_t *rsc)
{
pe_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
while (parent->parent != NULL) {
if (parent->parent == rsc) {
return TRUE;
}
parent = parent->parent;
}
return FALSE;
}
pe_resource_t *
uber_parent(pe_resource_t * rsc)
{
pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL && parent->parent->variant != pe_container) {
parent = parent->parent;
}
return parent;
}
/*!
* \internal
* \brief Get the topmost parent of a resource as a const pointer
*
* \param[in] rsc Resource to check
* \param[in] include_bundle If true, go all the way to bundle
*
* \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
const pe_resource_t *
pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
{
const pe_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
if (!include_bundle && (parent->parent->variant == pe_container)) {
break;
}
parent = parent->parent;
}
return parent;
}
void
common_free(pe_resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameter_cache != NULL) {
g_hash_table_destroy(rsc->parameter_cache);
}
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
rsc->orig_xml = NULL;
/* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
} else if (rsc->orig_xml) {
free_xml(rsc->xml);
rsc->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
pe_rsc_trace(rsc, "Resource freed");
free(rsc->id);
free(rsc->clone_name);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc);
}
/*!
* \internal
* \brief Count a node and update most preferred to it as appropriate
*
* \param[in] rsc An active resource
* \param[in] node A node that \p rsc is active on
* \param[in,out] active This will be set to \p node if \p node is more
* preferred than the current value
* \param[in,out] count_all If not NULL, this will be incremented
* \param[in,out] count_clean If not NULL, this will be incremented if \p node
* is online and clean
*
* \return true if the count should continue, or false if sufficiently known
*/
bool
pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
pe_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
bool is_happy = false;
CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
return false);
is_happy = node->details->online && !node->details->unclean;
if (count_all != NULL) {
++*count_all;
}
if ((count_clean != NULL) && is_happy) {
++*count_clean;
}
if ((count_all != NULL) || (count_clean != NULL)) {
keep_looking = true; // We're counting, so go through entire list
}
if (rsc->partial_migration_source != NULL) {
if (node->details == rsc->partial_migration_source->details) {
*active = node; // This is the migration source
} else {
keep_looking = true;
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
} else {
keep_looking = true;
}
}
if (*active == NULL) {
*active = node; // This is the first node checked
}
return keep_looking;
}
// Shared implementation of resource_object_functions_t:active_node()
static pe_node_t *
active_node(const pe_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
pe_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
}
if (count_clean != NULL) {
*count_clean = 0;
}
if (rsc == NULL) {
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
}
return active;
}
/*!
* \brief
* \internal Find and count active nodes according to "requires"
*
* \param[in] rsc Resource to check
* \param[out] count If not NULL, will be set to count of active nodes
*
* \return An active node (or NULL if resource is not active anywhere)
*
* \note This is a convenience wrapper for active_node() where the count of all
* active nodes or only clean active nodes is desired according to the
* "requires" meta-attribute.
*/
pe_node_t *
pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
*count = 0;
}
return NULL;
} else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
return rsc->fns->active_node(rsc, count, NULL);
} else {
return rsc->fns->active_node(rsc, NULL, count);
}
}
void
pe__count_common(pe_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
((pe_resource_t *) item->data)->fns->count(item->data);
}
} else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
|| (rsc->role > pcmk_role_stopped)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
rsc->cluster->blocked_resources++;
}
}
}
/*!
* \internal
* \brief Update a resource's next role
*
* \param[in,out] rsc Resource to be updated
* \param[in] role Resource's new next role
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
rsc->id, role2text(rsc->next_role), role2text(role), why);
rsc->next_role = role;
}
}
diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c
index 9d5dfea962..6f8dab7ef1 100644
--- a/lib/pengine/failcounts.c
+++ b/lib/pengine/failcounts.c
@@ -1,403 +1,467 @@
/*
* Copyright 2008-2023 the Pacemaker project contributors
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
static gboolean
is_matched_failure(const char *rsc_id, const xmlNode *conf_op_xml,
const xmlNode *lrm_op_xml)
{
gboolean matched = FALSE;
const char *conf_op_name = NULL;
const char *lrm_op_task = NULL;
const char *conf_op_interval_spec = NULL;
guint conf_op_interval_ms = 0;
guint lrm_op_interval_ms = 0;
const char *lrm_op_id = NULL;
char *last_failure_key = NULL;
if (rsc_id == NULL || conf_op_xml == NULL || lrm_op_xml == NULL) {
return FALSE;
}
// Get name and interval from configured op
conf_op_name = crm_element_value(conf_op_xml, "name");
conf_op_interval_spec = crm_element_value(conf_op_xml,
XML_LRM_ATTR_INTERVAL);
conf_op_interval_ms = crm_parse_interval_spec(conf_op_interval_spec);
// Get name and interval from op history entry
lrm_op_task = crm_element_value(lrm_op_xml, XML_LRM_ATTR_TASK);
crm_element_value_ms(lrm_op_xml, XML_LRM_ATTR_INTERVAL_MS,
&lrm_op_interval_ms);
if ((conf_op_interval_ms != lrm_op_interval_ms)
|| !pcmk__str_eq(conf_op_name, lrm_op_task, pcmk__str_casei)) {
return FALSE;
}
lrm_op_id = ID(lrm_op_xml);
last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0);
if (pcmk__str_eq(last_failure_key, lrm_op_id, pcmk__str_casei)) {
matched = TRUE;
} else {
char *expected_op_key = pcmk__op_key(rsc_id, conf_op_name,
conf_op_interval_ms);
if (pcmk__str_eq(expected_op_key, lrm_op_id, pcmk__str_casei)) {
int rc = 0;
int target_rc = pe__target_rc_from_xml(lrm_op_xml);
crm_element_value_int(lrm_op_xml, XML_LRM_ATTR_RC, &rc);
if (rc != target_rc) {
matched = TRUE;
}
}
free(expected_op_key);
}
free(last_failure_key);
return matched;
}
static gboolean
block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
{
char *xml_name = clone_strip(rsc->id);
/* @TODO This xpath search occurs after template expansion, but it is unable
* to properly detect on-fail in id-ref, operation meta-attributes, or
* op_defaults, or evaluate rules.
*
* Also, on-fail defaults to block (in unpack_operation()) for stop actions
* when stonith is disabled.
*
* Ideally, we'd unpack the operation before this point, and pass in a
* meta-attributes table that takes all that into consideration.
*/
char *xpath = crm_strdup_printf("//" XML_CIB_TAG_RESOURCE
"[@" XML_ATTR_ID "='%s']"
"//" XML_ATTR_OP
"[@" XML_OP_ATTR_ON_FAIL "='block']",
xml_name);
xmlXPathObject *xpathObj = xpath_search(rsc->xml, xpath);
gboolean should_block = FALSE;
free(xpath);
if (xpathObj) {
int max = numXpathResults(xpathObj);
int lpc = 0;
for (lpc = 0; lpc < max; lpc++) {
xmlNode *pref = getXpathResult(xpathObj, lpc);
if (xml_op) {
should_block = is_matched_failure(xml_name, pref, xml_op);
if (should_block) {
break;
}
} else {
const char *conf_op_name = NULL;
const char *conf_op_interval_spec = NULL;
guint conf_op_interval_ms = 0;
char *lrm_op_xpath = NULL;
xmlXPathObject *lrm_op_xpathObj = NULL;
// Get name and interval from configured op
conf_op_name = crm_element_value(pref, "name");
conf_op_interval_spec = crm_element_value(pref, XML_LRM_ATTR_INTERVAL);
conf_op_interval_ms = crm_parse_interval_spec(conf_op_interval_spec);
#define XPATH_FMT "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
"//" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']" \
"/" XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_TASK "='%s']" \
"[@" XML_LRM_ATTR_INTERVAL "='%u']"
lrm_op_xpath = crm_strdup_printf(XPATH_FMT,
node->details->uname, xml_name,
conf_op_name,
conf_op_interval_ms);
lrm_op_xpathObj = xpath_search(rsc->cluster->input, lrm_op_xpath);
free(lrm_op_xpath);
if (lrm_op_xpathObj) {
int max2 = numXpathResults(lrm_op_xpathObj);
int lpc2 = 0;
for (lpc2 = 0; lpc2 < max2; lpc2++) {
xmlNode *lrm_op_xml = getXpathResult(lrm_op_xpathObj,
lpc2);
should_block = is_matched_failure(xml_name, pref,
lrm_op_xml);
if (should_block) {
break;
}
}
}
freeXpathObject(lrm_op_xpathObj);
if (should_block) {
break;
}
}
}
}
free(xml_name);
freeXpathObject(xpathObj);
return should_block;
}
/*!
* \internal
* \brief Get resource name as used in failure-related node attributes
*
* \param[in] rsc Resource to check
*
* \return Newly allocated string containing resource's fail name
* \note The caller is responsible for freeing the result.
*/
static inline char *
rsc_fail_name(const pe_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
}
/*!
* \internal
* \brief Compile regular expression to match a failure-related node attribute
*
* \param[in] prefix Attribute prefix to match
* \param[in] rsc_name Resource name to match as used in failure attributes
* \param[in] is_legacy Whether DC uses per-resource fail counts
* \param[in] is_unique Whether the resource is a globally unique clone
* \param[out] re Where to store resulting regular expression
*
* \return Standard Pacemaker return code
* \note Fail attributes are named like PREFIX-RESOURCE#OP_INTERVAL.
* The caller is responsible for freeing re with regfree().
*/
static int
generate_fail_regex(const char *prefix, const char *rsc_name,
gboolean is_legacy, gboolean is_unique, regex_t *re)
{
char *pattern;
/* @COMPAT DC < 1.1.17: Fail counts used to be per-resource rather than
* per-operation.
*/
const char *op_pattern = (is_legacy? "" : "#.+_[0-9]+");
/* Ignore instance numbers for anything other than globally unique clones.
* Anonymous clone fail counts could contain an instance number if the
* clone was initially unique, failed, then was converted to anonymous.
* @COMPAT Also, before 1.1.8, anonymous clone fail counts always contained
* clone instance numbers.
*/
const char *instance_pattern = (is_unique? "" : "(:[0-9]+)?");
pattern = crm_strdup_printf("^%s-%s%s%s$", prefix, rsc_name,
instance_pattern, op_pattern);
if (regcomp(re, pattern, REG_EXTENDED|REG_NOSUB) != 0) {
free(pattern);
return EINVAL;
}
free(pattern);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Compile regular expressions to match failure-related node attributes
*
* \param[in] rsc Resource being checked for failures
- * \param[in] data_set Data set (for CRM feature set version)
* \param[out] failcount_re Storage for regular expression for fail count
* \param[out] lastfailure_re Storage for regular expression for last failure
*
* \return Standard Pacemaker return code
* \note On success, the caller is responsible for freeing the expressions with
* regfree().
*/
static int
generate_fail_regexes(const pe_resource_t *rsc,
- const pe_working_set_t *data_set,
regex_t *failcount_re, regex_t *lastfailure_re)
{
+ int rc = pcmk_rc_ok;
char *rsc_name = rsc_fail_name(rsc);
- const char *version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ const char *version = crm_element_value(rsc->cluster->input,
+ XML_ATTR_CRM_VERSION);
+
+ // @COMPAT Pacemaker <= 1.1.16 used a single fail count per resource
gboolean is_legacy = (compare_version(version, "3.0.13") < 0);
- int rc = pcmk_rc_ok;
if (generate_fail_regex(PCMK__FAIL_COUNT_PREFIX, rsc_name, is_legacy,
pcmk_is_set(rsc->flags, pe_rsc_unique),
failcount_re) != pcmk_rc_ok) {
rc = EINVAL;
} else if (generate_fail_regex(PCMK__LAST_FAILURE_PREFIX, rsc_name,
is_legacy,
pcmk_is_set(rsc->flags, pe_rsc_unique),
lastfailure_re) != pcmk_rc_ok) {
rc = EINVAL;
regfree(failcount_re);
}
free(rsc_name);
return rc;
}
-int
-pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
- time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+// Data for fail-count-related iterators
+struct failcount_data {
+ const pe_node_t *node; // Node to check for fail count
+ pe_resource_t *rsc; // Resource to check for fail count
+ uint32_t flags; // Fail count flags
+ const xmlNode *xml_op; // History entry for expiration purposes (or NULL)
+ regex_t failcount_re; // Fail count regular expression to match
+ regex_t lastfailure_re; // Last failure regular expression to match
+ int failcount; // Fail count so far
+ time_t last_failure; // Time of most recent failure so far
+};
+
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a node attribute
+ *
+ * \param[in] key Node attribute name
+ * \param[in] value Node attribute value
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_attr(gpointer key, gpointer value, gpointer user_data)
{
- char *key = NULL;
- const char *value = NULL;
- regex_t failcount_re, lastfailure_re;
- int failcount = 0;
- time_t last = 0;
- GHashTableIter iter;
-
- CRM_CHECK(generate_fail_regexes(rsc, rsc->cluster, &failcount_re,
- &lastfailure_re) == pcmk_rc_ok,
- return 0);
+ struct failcount_data *fc_data = user_data;
+
+ // If this is a matching fail count attribute, update fail count
+ if (regexec(&(fc_data->failcount_re), (const char *) key, 0, NULL, 0) == 0) {
+ fc_data->failcount = pcmk__add_scores(fc_data->failcount,
+ char2score(value));
+ pe_rsc_trace(fc_data->rsc, "Added %s (%s) to %s fail count (now %s)",
+ (const char *) key, (const char *) value, fc_data->rsc->id,
+ pcmk_readable_score(fc_data->failcount));
+ return;
+ }
- /* Resource fail count is sum of all matching operation fail counts */
- g_hash_table_iter_init(&iter, node->details->attrs);
- while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
- if (regexec(&failcount_re, key, 0, NULL, 0) == 0) {
- failcount = pcmk__add_scores(failcount, char2score(value));
- crm_trace("Added %s (%s) to %s fail count (now %s)",
- key, value, rsc->id, pcmk_readable_score(failcount));
- } else if (regexec(&lastfailure_re, key, 0, NULL, 0) == 0) {
- long long last_ll;
-
- if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
- last = (time_t) QB_MAX(last, last_ll);
- }
+ // If this is a matching last failure attribute, update last failure
+ if (regexec(&(fc_data->lastfailure_re), (const char *) key, 0, NULL,
+ 0) == 0) {
+ long long last_ll;
+
+ if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
+ fc_data->last_failure = (time_t) QB_MAX(fc_data->last_failure,
+ last_ll);
}
}
+}
- regfree(&failcount_re);
- regfree(&lastfailure_re);
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a filler resource
+ *
+ * \param[in] data Filler resource
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_filler(gpointer data, gpointer user_data)
+{
+ pe_resource_t *filler = data;
+ struct failcount_data *fc_data = user_data;
+ time_t filler_last_failure = 0;
+
+ fc_data->failcount += pe_get_failcount(fc_data->node, filler,
+ &filler_last_failure, fc_data->flags,
+ fc_data->xml_op);
+ fc_data->last_failure = QB_MAX(fc_data->last_failure, filler_last_failure);
+}
- if ((failcount > 0) && (last > 0) && (last_failure != NULL)) {
- *last_failure = last;
- }
+/*!
+ * \internal
+ * \brief Get a resource's fail count on a node
+ *
+ * \param[in] node Node to check
+ * \param[in,out] rsc Resource to check
+ * \param[out] last_failure If not NULL, where to set time of most recent
+ * failure of \p rsc on \p node
+ * \param[in] flags Group of enum pe_fc_flags_e flags
+ * \param[in] xml_op If not NULL, consider only the action in this
+ * history entry when determining whether on-fail
+ * is configured as "blocked", otherwise consider
+ * all actions configured for \p rsc
+ *
+ * \return Fail count for \p rsc on \p node according to \p flags
+ */
+int
+pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
+ time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+{
+ struct failcount_data fc_data = {
+ .node = node,
+ .rsc = rsc,
+ .flags = flags,
+ .xml_op = xml_op,
+ .failcount = 0,
+ .last_failure = (time_t) 0,
+ };
+
+ // Calculate resource failcount as sum of all matching operation failcounts
+ CRM_CHECK(generate_fail_regexes(rsc, &fc_data.failcount_re,
+ &fc_data.lastfailure_re) == pcmk_rc_ok,
+ return 0);
+ g_hash_table_foreach(node->details->attrs, update_failcount_for_attr,
+ &fc_data);
+ regfree(&(fc_data.failcount_re));
+ regfree(&(fc_data.lastfailure_re));
- /* If failure blocks the resource, disregard any failure timeout */
- if ((failcount > 0) && rsc->failure_timeout
+ // If failure blocks the resource, disregard any failure timeout
+ if ((fc_data.failcount > 0) && (rsc->failure_timeout > 0)
&& block_failure(node, rsc, xml_op)) {
- pe_warn("Ignoring failure timeout %d for %s because it conflicts with on-fail=block",
+ pe_warn("Ignoring failure timeout %d for %s "
+ "because it conflicts with on-fail=block",
rsc->failure_timeout, rsc->id);
rsc->failure_timeout = 0;
}
- /* If all failures have expired, ignore fail count */
- if (pcmk_is_set(flags, pe_fc_effective) && (failcount > 0) && (last > 0)
- && rsc->failure_timeout) {
+ // If all failures have expired, ignore fail count
+ if (pcmk_is_set(flags, pe_fc_effective) && (fc_data.failcount > 0)
+ && (fc_data.last_failure > 0) && (rsc->failure_timeout != 0)) {
time_t now = get_effective_time(rsc->cluster);
- if (now > (last + rsc->failure_timeout)) {
- crm_debug("Failcount for %s on %s expired after %ds",
- rsc->id, pe__node_name(node), rsc->failure_timeout);
- failcount = 0;
+ if (now > (fc_data.last_failure + rsc->failure_timeout)) {
+ pe_rsc_debug(rsc, "Failcount for %s on %s expired after %ds",
+ rsc->id, pe__node_name(node), rsc->failure_timeout);
+ fc_data.failcount = 0;
}
}
- /* We never want the fail counts of a bundle container's fillers to
- * count towards the container's fail count.
+ /* Add the fail count of any filler resources, except that we never want the
+ * fail counts of a bundle container's fillers to count towards the
+ * container's fail count.
*
* Most importantly, a Pacemaker Remote connection to a bundle container
* is a filler of the container, but can reside on a different node than the
* container itself. Counting its fail count on its node towards the
* container's fail count on that node could lead to attempting to stop the
* container on the wrong node.
*/
-
- if (pcmk_is_set(flags, pe_fc_fillers) && rsc->fillers
+ if (pcmk_is_set(flags, pe_fc_fillers) && (rsc->fillers != NULL)
&& !pe_rsc_is_bundled(rsc)) {
- GList *gIter = NULL;
-
- for (gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = (pe_resource_t *) gIter->data;
- time_t filler_last_failure = 0;
-
- failcount += pe_get_failcount(node, filler, &filler_last_failure,
- flags, xml_op);
-
- if (last_failure && filler_last_failure > *last_failure) {
- *last_failure = filler_last_failure;
- }
- }
-
- if (failcount > 0) {
- crm_info("Container %s and the resources within it "
- "have failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ g_list_foreach(rsc->fillers, update_failcount_for_filler, &fc_data);
+ if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc,
+ "Container %s and the resources within it "
+ "have failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- } else if (failcount > 0) {
- crm_info("%s has failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ } else if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc, "%s has failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- return failcount;
+ if (last_failure != NULL) {
+ if ((fc_data.failcount > 0) && (fc_data.last_failure > 0)) {
+ *last_failure = fc_data.last_failure;
+ } else {
+ *last_failure = 0;
+ }
+ }
+ return fc_data.failcount;
}
/*!
* \brief Schedule a controller operation to clear a fail count
*
* \param[in,out] rsc Resource with failure
* \param[in] node Node failure occurred on
* \param[in] reason Readable description why needed (for logging)
* \param[in,out] data_set Working set for cluster
*
* \return Scheduled action
*/
pe_action_t *
pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
const char *reason, pe_working_set_t *data_set)
{
char *key = NULL;
pe_action_t *clear = NULL;
CRM_CHECK(rsc && node && reason && data_set, return NULL);
key = pcmk__op_key(rsc->id, PCMK_ACTION_CLEAR_FAILCOUNT, 0);
clear = custom_action(rsc, key, PCMK_ACTION_CLEAR_FAILCOUNT, node, FALSE,
TRUE, data_set);
add_hash_param(clear->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s",
rsc->id, pe__node_name(node), reason, clear->uuid);
return clear;
}
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index 993c081c5d..af42a7c789 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,1699 +1,1743 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include "pe_status_private.h"
static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms);
+ const pe_resource_t *container, guint interval_ms);
static void
add_singleton(pe_working_set_t *data_set, pe_action_t *action)
{
if (data_set->singletons == NULL) {
data_set->singletons = pcmk__strkey_table(NULL, NULL);
}
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
static pe_action_t *
lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
{
if (data_set->singletons == NULL) {
return NULL;
}
return g_hash_table_lookup(data_set->singletons, action_uuid);
}
/*!
* \internal
* \brief Find an existing action that matches arguments
*
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
* \param[in] data_set Cluster working set
*
* \return Existing action that matches arguments (or NULL if none)
*/
static pe_action_t *
find_existing_action(const char *key, const pe_resource_t *rsc,
const pe_node_t *node, const pe_working_set_t *data_set)
{
GList *matches = NULL;
pe_action_t *action = NULL;
/* When rsc is NULL, it would be quicker to check data_set->singletons,
* but checking all data_set->actions takes the node into account.
*/
matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
}
CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
action = matches->data;
g_list_free(matches);
return action;
}
static xmlNode *
find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
gboolean include_disabled)
{
guint interval_ms = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *interval_spec = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
bool enabled = false;
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
!enabled) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
match_key = pcmk__op_key(rsc->id, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if ((strstr(key, PCMK_ACTION_MIGRATE_TO) != NULL)
|| (strstr(key, PCMK_ACTION_MIGRATE_FROM) != NULL)) {
local_key = pcmk__op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = pcmk__op_key(rsc->id, PCMK_ACTION_NOTIFY, 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
/*!
* \internal
* \brief Create a new action object
*
* \param[in] key Action key
* \param[in] task Action name
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in,out] data_set Cluster working set
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
static pe_action_t *
new_action(char *key, const char *task, pe_resource_t *rsc,
const pe_node_t *node, bool optional, bool for_graph,
pe_working_set_t *data_set)
{
pe_action_t *action = calloc(1, sizeof(pe_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
action->extra = pcmk__strkey_table(free, free);
action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
pe__set_action_flags(action, pe_action_dc);
}
pe__set_action_flags(action, pe_action_runnable);
if (optional) {
pe__set_action_flags(action, pe_action_optional);
} else {
pe__clear_action_flags(action, pe_action_optional);
}
if (rsc != NULL) {
guint interval_ms = 0;
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
- unpack_operation(action, action->op_entry, rsc->container, data_set,
- interval_ms);
+ unpack_operation(action, action->op_entry, rsc->container, interval_ms);
}
if (for_graph) {
pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
(optional? "optional" : "required"),
data_set->action_id, key, task,
((rsc == NULL)? "no resource" : rsc->id),
pe__node_name(node));
action->id = data_set->action_id++;
data_set->actions = g_list_prepend(data_set->actions, action);
if (rsc == NULL) {
add_singleton(data_set, action);
} else {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
return action;
}
/*!
* \internal
* \brief Evaluate node attribute values for an action
*
* \param[in,out] action Action to unpack attributes for
* \param[in,out] data_set Cluster working set
*/
static void
unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
{
if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
&& (action->op_entry != NULL)) {
pe_rule_eval_data_t rule_data = {
.node_hash = action->node->details->attrs,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
pe__set_action_flags(action, pe_action_have_node_attrs);
pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
&rule_data, action->extra, NULL,
FALSE, data_set);
}
}
/*!
* \internal
* \brief Update an action's optional flag
*
* \param[in,out] action Action to update
* \param[in] optional Requested optional status
*/
static void
update_action_optional(pe_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
&& !pcmk_is_set(action->flags, pe_action_pseudo)
&& !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
pe__set_action_flags(action, pe_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
pe__clear_action_flags(action, pe_action_optional);
}
}
static enum pe_quorum_policy
effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
{
enum pe_quorum_policy policy = data_set->no_quorum_policy;
if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
policy = no_quorum_ignore;
} else if (data_set->no_quorum_policy == no_quorum_demote) {
switch (rsc->role) {
case pcmk_role_promoted:
case pcmk_role_unpromoted:
if (rsc->next_role > pcmk_role_unpromoted) {
pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
policy = no_quorum_ignore;
break;
default:
policy = no_quorum_stop;
break;
}
}
return policy;
}
/*!
* \internal
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
* \param[in] for_graph Whether action should be recorded in transition graph
* \param[in,out] data_set Cluster working set
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_working_set_t *data_set)
{
if (pcmk_is_set(action->flags, pe_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
pe__clear_action_flags(action, pe_action_runnable);
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
pe__clear_action_flags(action, pe_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
&& for_graph
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
pe_fence_node(data_set, action->node, "stop is unrunnable", false);
}
} else if (!pcmk_is_set(action->flags, pe_action_dc)
&& action->node->details->pending) {
pe__clear_action_flags(action, pe_action_runnable);
do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
} else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
&& !pe_can_fence(data_set, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
* host is unclean and cannot be fenced.
*/
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pe_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
pe__set_action_flags(action, pe_action_runnable);
}
} else {
switch (effective_quorum_policy(action->rsc, data_set)) {
case no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
case no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
pe__clear_action_flags(action, pe_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
pe__set_action_flags(action, pe_action_runnable);
break;
}
}
}
/*!
* \internal
* \brief Update a resource object's flags for a new action on it
*
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] action New action
*/
static void
update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
{
/* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
* within Pacemaker, and should be deprecated and eventually removed
*/
if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
pe__set_resource_flags(rsc, pe_rsc_stopping);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
if (pcmk_is_set(action->flags, pe_action_runnable)) {
pe__set_resource_flags(rsc, pe_rsc_starting);
} else {
pe__clear_resource_flags(rsc, pe_rsc_starting);
}
}
}
static bool
valid_stop_on_fail(const char *value)
{
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
+/*!
+ * \internal
+ * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] action_config Action configuration XML from CIB (if any)
+ * \param[in,out] meta Table of action meta-attributes
+ *
+ * \return (Possibly new) value of on-fail meta-attribute
+ */
static const char *
-unpack_operation_on_fail(pe_action_t * action)
+validate_on_fail(const pe_resource_t *rsc, const char *action_name,
+ const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
- const char *on_fail = NULL;
const char *interval_spec = NULL;
- const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ char *key = NULL;
+ char *new_value = NULL;
- if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
+ // Stop actions can only use certain on-fail values
+ if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
- "allowed for stop", action->rsc->id, value);
+ "allowed for stop", rsc->id, value);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
return NULL;
+ }
- } else if (pcmk__str_eq(action->task, PCMK_ACTION_DEMOTE, pcmk__str_casei)
- && (value == NULL)) {
- // demote on_fail defaults to monitor value for promoted role if present
- xmlNode *operation = NULL;
-
- CRM_CHECK(action->rsc != NULL, return NULL);
+ /* Demote actions default on-fail to the on-fail value for the first
+ * recurring monitor for the promoted role (if any).
+ */
+ if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && (value == NULL)) {
- for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
- (operation != NULL) && (value == NULL);
- operation = pcmk__xe_next(operation)) {
+ /* @TODO This does not consider promote options set in a meta-attribute
+ * block (which may have rules that need to be evaluated) rather than
+ * XML properties.
+ */
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
+ const char *promote_on_fail = NULL;
- if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ /* We only care about explicit on-fail (if promote uses default, so
+ * can demote)
+ */
+ promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
+ if (promote_on_fail == NULL) {
continue;
}
+
+ // We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
- on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!on_fail) {
- continue;
- } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
+ if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
- } else if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_casei)
- || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
- PCMK__ROLE_PROMOTED_LEGACY,
- NULL)) {
+ }
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
- } else if (crm_parse_interval_spec(interval_spec) == 0) {
+ }
+
+ // We only care about enabled monitors
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
continue;
- } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
+ }
+
+ // Demote actions can't default to on-fail="demote"
+ if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
- value = on_fail;
+ // Use value from first applicable promote action found
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup(promote_on_fail);
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
+ return g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
}
- } else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE,
- pcmk__str_casei)) {
- value = "ignore";
+ return NULL;
+ }
- } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- name = crm_element_value(action->op_entry, "name");
- role = crm_element_value(action->op_entry, "role");
- interval_spec = crm_element_value(action->op_entry,
+ if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
+ && !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup("ignore");
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
+ return g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ }
+
+ // on-fail="demote" is allowed only for certain actions
+ if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ name = crm_element_value(action_config, "name");
+ role = crm_element_value(action_config, "role");
+ interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_casei)
- && (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_casei)
+ if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
|| !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
- "allowed for it", action->rsc->id, name);
+ "allowed for it", rsc->id, name);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
return NULL;
}
}
return value;
}
static int
unpack_timeout(const char *value)
{
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
// true if value contains valid, non-NULL interval origin for recurring op
static bool
unpack_interval_origin(const char *value, const xmlNode *xml_obj,
guint interval_ms, const crm_time_t *now,
long long *start_delay)
{
long long result = 0;
guint interval_sec = interval_ms / 1000;
crm_time_t *origin = NULL;
// Ignore unspecified values and non-recurring operations
if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
return false;
}
// Parse interval origin from text
origin = crm_time_new(value);
if (origin == NULL) {
pcmk__config_err("Ignoring '" XML_OP_ATTR_ORIGIN "' for operation "
"'%s' because '%s' is not valid",
(ID(xml_obj)? ID(xml_obj) : "(missing ID)"), value);
return false;
}
// Get seconds since origin (negative if origin is in the future)
result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
crm_time_free(origin);
// Calculate seconds from closest interval to now
result = result % interval_sec;
// Calculate seconds remaining until next interval
result = ((result <= 0)? 0 : interval_sec) - result;
crm_info("Calculated a start delay of %llds for operation '%s'",
result,
(ID(xml_obj)? ID(xml_obj) : "(unspecified)"));
if (start_delay != NULL) {
*start_delay = result * 1000; // milliseconds
}
return true;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY),
pcmk__itoa(start_delay));
}
}
return start_delay;
}
static xmlNode *
find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
{
guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
const char *name = NULL;
const char *interval_spec = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = pcmk__xe_first_child(rsc->ops_xml);
operation != NULL;
operation = pcmk__xe_next(operation)) {
if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
bool enabled = false;
name = crm_element_value(operation, "name");
interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
!enabled) {
continue;
}
if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
continue;
}
interval_ms = crm_parse_interval_spec(interval_spec);
if (interval_ms && (interval_ms < min_interval_ms)) {
min_interval_ms = interval_ms;
op = operation;
}
}
}
return op;
}
/*!
- * \brief Unpack operation XML into an action structure
+ * \internal
+ * \brief Unpack action configuration
*
- * Unpack an operation's meta-attributes (normalizing the interval, timeout,
- * and start delay values as integer milliseconds), requirements, and
- * failure policy.
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds), requirements, and
+ * failure policy from its CIB XML configuration (including defaults).
*
- * \param[in,out] action Action to unpack into
- * \param[in] xml_obj Operation XML (or NULL if all defaults)
+ * \param[in,out] action Resource action to unpack into
+ * \param[in] xml_obj Action configuration XML (NULL for defaults only)
* \param[in] container Resource that contains affected resource, if any
- * \param[in,out] data_set Cluster state
* \param[in] interval_ms How frequently to perform the operation
*/
static void
unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms)
+ const pe_resource_t *container, guint interval_ms)
{
int timeout_ms = 0;
const char *value = NULL;
bool is_probe = false;
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
.provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
.agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
};
pe_op_eval_data_t op_rule_data = {
.op_name = action->task,
.interval = interval_ms
};
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
- .now = data_set->now,
+ .now = action->rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = &op_rule_data
};
CRM_CHECK(action && action->rsc, return);
is_probe = pcmk_is_probe(action->task, interval_ms);
// Cluster-wide
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(action->rsc->cluster->op_defaults,
+ XML_TAG_META_SETS, &rule_data, action->meta,
+ NULL, FALSE, action->rsc->cluster);
// Determine probe default timeout differently
if (is_probe) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
if (value) {
crm_trace("\t%s: Setting default timeout to minimum-interval "
"monitor's timeout '%s'", action->uuid, value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
}
if (xml_obj) {
xmlAttrPtr xIter = NULL;
// take precedence over defaults
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, TRUE, data_set);
+ action->meta, NULL, TRUE,
+ action->rsc->cluster);
/* Anything set as an XML property has highest precedence.
* This ensures we use the name and interval from the tag.
*/
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = pcmk__xml_attr_value(xIter);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
g_hash_table_remove(action->meta, "id");
// Normalize interval to milliseconds
if (interval_ms > 0) {
g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
crm_strdup_printf("%u", interval_ms));
} else {
g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
}
/*
* Timeout order of precedence:
* 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
* and task is start or a probe; pcmk_monitor_timeout works
* by default for a recurring monitor)
* 2. explicit op timeout on the primitive
* 3. default op timeout
* a. if probe, then min-interval monitor's timeout
* b. else, in XML_CIB_TAG_OPCONFIG
* 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
*
* #1 overrides general rule of XML property having highest
* precedence.
*/
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
&& (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)
|| is_probe)) {
- GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+ GHashTable *params = pe_rsc_params(action->rsc, action->node,
+ action->rsc->cluster);
value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
if (value) {
crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
"overriding default", action->uuid, value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
// Normalize timeout to positive milliseconds
value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
timeout_ms = unpack_timeout(value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
pcmk__itoa(timeout_ms));
if (!pcmk__strcase_any_of(action->task, PCMK_ACTION_START,
PCMK_ACTION_PROMOTE, NULL)) {
action->needs = pcmk_requires_nothing;
value = "nothing (not start or promote)";
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = pcmk_requires_fencing;
value = "fencing";
} else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = pcmk_requires_quorum;
value = "quorum";
} else {
action->needs = pcmk_requires_nothing;
value = "nothing";
}
pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
- value = unpack_operation_on_fail(action);
+ value = validate_on_fail(action->rsc, action->task, xml_obj, action->meta);
if (value == NULL) {
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_block;
- g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
- value = "block"; // The above could destroy the original string
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_fence_node;
value = "node fencing";
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (!pcmk_is_set(action->rsc->cluster->flags,
+ pe_flag_stonith_enabled)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
"operation '%s' to 'stop' because 'fence' is not "
"valid when fencing is disabled", action->uuid);
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_standby_node;
value = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
action->on_fail = pcmk_on_fail_ignore;
value = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_ban;
value = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_restart;
value = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
if (container) {
action->on_fail = pcmk_on_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
action->on_fail = pcmk_on_fail_demote;
value = "demote instance";
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = pcmk_on_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* For remote nodes, ensure that any failure that results in dropping an
* active connection to the node results in fencing of the node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
- && pe__resource_is_remote_conn(action->rsc, data_set)
+ && pe__resource_is_remote_conn(action->rsc)
&& !(pcmk__str_eq(action->task, PCMK_ACTION_MONITOR,
pcmk__str_casei)
&& (interval_ms == 0))
&& !pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
action->on_fail = pcmk_on_fail_stop;
action->fail_role = pcmk_role_stopped;
value = "stop unmanaged remote node (enforcing default)";
} else {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(action->rsc->cluster->flags,
+ pe_flag_stonith_enabled)) {
value = "fence remote node (default)";
} else {
value = "recover remote node connection (default)";
}
if (action->rsc->remote_reconnect_ms) {
action->fail_role = pcmk_role_stopped;
}
action->on_fail = pcmk_on_fail_reset_remote;
}
} else if ((value == NULL)
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP,
pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(action->rsc->cluster->flags, pe_flag_stonith_enabled)) {
action->on_fail = pcmk_on_fail_fence_node;
value = "resource fence (default)";
} else {
action->on_fail = pcmk_on_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = pcmk_on_fail_restart;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "%s failure handling: %s",
action->uuid, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
if (value) {
pe_warn_once(pe_wo_role_after,
"Support for role_after_failure is deprecated and will be removed in a future release");
}
}
if (value != NULL && action->fail_role == pcmk_role_unknown) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == pcmk_role_unknown) {
if (pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
action->fail_role = pcmk_role_unpromoted;
} else {
action->fail_role = pcmk_role_started;
}
}
pe_rsc_trace(action->rsc, "%s failure results in: %s",
action->uuid, role2text(action->fail_role));
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
if (value) {
unpack_start_delay(value, action->meta);
} else {
long long start_delay = 0;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
- if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
- &start_delay)) {
+ if (unpack_interval_origin(value, xml_obj, interval_ms,
+ action->rsc->cluster->now, &start_delay)) {
g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
crm_strdup_printf("%lld", start_delay));
}
}
}
/*!
* \brief Create or update an action object
*
* \param[in,out] rsc Resource that action is for (if any)
* \param[in,out] key Action key (must be non-NULL)
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
* \param[in] save_action Whether action should be recorded in transition graph
* \param[in,out] data_set Cluster working set
*
* \return Action object corresponding to arguments
* \note This function takes ownership of (and might free) \p key. If
* \p save_action is true, \p data_set will own the returned action,
* otherwise it is the caller's responsibility to free the return value
* with pe_free_action().
*/
pe_action_t *
custom_action(pe_resource_t *rsc, char *key, const char *task,
const pe_node_t *on_node, gboolean optional, gboolean save_action,
pe_working_set_t *data_set)
{
pe_action_t *action = NULL;
CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
if (save_action) {
action = find_existing_action(key, rsc, on_node, data_set);
}
if (action == NULL) {
action = new_action(key, task, rsc, on_node, optional, save_action,
data_set);
} else {
free(key);
}
update_action_optional(action, optional);
if (rsc != NULL) {
if (action->node != NULL) {
unpack_action_node_attributes(action, data_set);
}
update_resource_action_runnable(action, save_action, data_set);
if (save_action) {
update_resource_flags_for_action(rsc, action);
}
}
return action;
}
pe_action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
pe_action_t *op = lookup_singleton(data_set, name);
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
}
return op;
}
static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
pe_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
continue;
} else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
PCMK_STONITH_PROVIDES),
PCMK__VALUE_UNFENCING,
pcmk__str_casei)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
static int
node_priority_fencing_delay(const pe_node_t *node,
const pe_working_set_t *data_set)
{
int member_count = 0;
int online_count = 0;
int top_priority = 0;
int lowest_priority = 0;
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
if (data_set->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
if (node->details->type != node_member) {
return 0;
}
// No need to request a delay if the fencing target is in our partition
if (node->details->online) {
return 0;
}
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *n = gIter->data;
if (n->details->type != node_member) {
continue;
}
member_count ++;
if (n->details->online) {
online_count++;
}
if (member_count == 1
|| n->details->priority > top_priority) {
top_priority = n->details->priority;
}
if (member_count == 1
|| n->details->priority < lowest_priority) {
lowest_priority = n->details->priority;
}
}
// No need to delay if we have more than half of the cluster members
if (online_count > member_count / 2) {
return 0;
}
/* All the nodes have equal priority.
* Any configured corresponding `pcmk_delay_base/max` will be applied. */
if (lowest_priority == top_priority) {
return 0;
}
if (node->details->priority < top_priority) {
return 0;
}
return data_set->priority_fencing_delay;
}
pe_action_t *
pe_fence_op(pe_node_t *node, const char *op, bool optional,
const char *reason, bool priority_delay, pe_working_set_t *data_set)
{
char *op_key = NULL;
pe_action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s",
PCMK_ACTION_STONITH, node->details->uname, op);
stonith_op = lookup_singleton(data_set, op_key);
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
TRUE, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
GList *matches = find_unfencing_devices(data_set->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
pe_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
data = pe__compare_fencing_digest(match, agent, node, data_set);
if(data->rc == RSC_DIGEST_ALL) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
if (!pcmk__is_daemon && data_set->priv != NULL) {
pcmk__output_t *out = data_set->priv;
out->info(out,
"notice: Unfencing node %s because the "
"definition of %s changed",
pe__node_name(node), match->id);
}
}
pcmk__g_strcat(digests_all,
match->id, ":", agent, ":",
data->digest_all_calc, ",", NULL);
pcmk__g_strcat(digests_secure,
match->id, ":", agent, ":",
data->digest_secure_calc, ",", NULL);
}
key = strdup(XML_OP_ATTR_DIGESTS_ALL);
value = strdup((const char *) digests_all->str);
CRM_ASSERT((key != NULL) && (value != NULL));
g_hash_table_insert(stonith_op->meta, key, value);
g_string_free(digests_all, TRUE);
key = strdup(XML_OP_ATTR_DIGESTS_SECURE);
value = strdup((const char *) digests_secure->str);
CRM_ASSERT((key != NULL) && (value != NULL));
g_hash_table_insert(stonith_op->meta, key, value);
g_string_free(digests_secure, TRUE);
}
} else {
free(op_key);
}
if (data_set->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
&& (priority_delay
/* The priority delay needs to be recalculated if this function has
* been called by schedule_fencing_and_shutdowns() after node
* priority has already been calculated by native_add_running().
*/
|| g_hash_table_lookup(stonith_op->meta,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY) != NULL)) {
/* Add `priority-fencing-delay` to the fencing op even if it's 0 for
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
if(optional == FALSE && pe_can_fence(data_set, node)) {
pe__clear_action_flags(stonith_op, pe_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
pe_free_action(pe_action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
int
pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
const char *timeout_spec = NULL;
int timeout_ms = 0;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = pcmk_role_unknown,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (pcmk__str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME),
pcmk__str_casei)) {
timeout_spec = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout_spec == NULL && data_set->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
&rule_data, action_meta, NULL, FALSE, data_set);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
// @TODO check meta-attributes
// @TODO maybe use min-interval monitor timeout as default for monitors
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
g_hash_table_destroy(action_meta);
}
return timeout_ms;
}
enum action_tasks
get_complex_task(const pe_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
if ((rsc != NULL) && (rsc->variant == pe_native)) {
switch (task) {
case pcmk_action_stopped:
case pcmk_action_started:
case pcmk_action_demoted:
case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
break;
default:
break;
}
}
return task;
}
/*!
* \internal
* \brief Find first matching action in a list
*
* \param[in] input List of actions to search
* \param[in] uuid If not NULL, action must have this UUID
* \param[in] task If not NULL, action must have this action name
* \param[in] on_node If not NULL, action must be on this node
*
* \return First action in list that matches criteria, or NULL if none
*/
pe_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
const pe_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
} else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GList *
find_actions(GList *input, const char *key, const pe_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, pe__node_name(on_node));
action->node = pe__copy_node(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
result = g_list_prepend(result, action);
}
}
return result;
}
GList *
find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
{
GList *result = NULL;
CRM_CHECK(key != NULL, return NULL);
if (on_node == NULL) {
return NULL;
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
pe_action_t *action = (pe_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
&& pcmk__str_eq(on_node->details->id, action->node->details->id,
pcmk__str_casei)) {
crm_trace("Action %s on %s matches", key, pe__node_name(on_node));
result = g_list_prepend(result, action);
}
}
return result;
}
/*!
* \brief Find all actions of given type for a resource
*
* \param[in] rsc Resource to search
* \param[in] node Find only actions scheduled on this node
* \param[in] task Action name to search for
* \param[in] require_node If TRUE, NULL node or action node will not match
*
* \return List of actions found (or NULL if none)
* \note If node is not NULL and require_node is FALSE, matching actions
* without a node will be assigned to node.
*/
GList *
pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
char *key = pcmk__op_key(rsc->id, task, 0);
if (require_node) {
result = find_actions_exact(rsc->actions, key, node);
} else {
result = find_actions(rsc->actions, key, node);
}
free(key);
return result;
}
/*!
* \internal
* \brief Create an action reason string based on the action itself
*
* \param[in] action Action to create reason string for
* \param[in] flag Action flag that was cleared
*
* \return Newly allocated string suitable for use as action reason
* \note It is the caller's responsibility to free() the result.
*/
char *
pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
case pe_action_runnable:
change = "unrunnable";
break;
case pe_action_migrate_runnable:
change = "unmigrateable";
break;
case pe_action_optional:
change = "required";
break;
default:
// Bug: caller passed unsupported flag
CRM_CHECK(change != NULL, change = "");
break;
}
return crm_strdup_printf("%s%s%s %s", change,
(action->rsc == NULL)? "" : " ",
(action->rsc == NULL)? "" : action->rsc->id,
action->task);
}
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
action->uuid, action->reason, pcmk__s(reason, "(none)"));
} else if (action->reason == NULL) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'",
action->uuid, pcmk__s(reason, "(none)"));
} else {
// crm_assert(action->reason != NULL && !overwrite);
return;
}
pcmk__str_update(&action->reason, reason);
}
/*!
* \internal
* \brief Create an action to clear a resource's history from CIB
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
* \param[in,out] data_set Cluster working set
*
* \return New action to clear resource history
*/
pe_action_t *
pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
pe_working_set_t *data_set)
{
char *key = NULL;
CRM_ASSERT(rsc && node);
key = pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0);
return custom_action(rsc, key, PCMK_ACTION_LRM_DELETE, node, FALSE, TRUE,
data_set);
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
int
pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const char *a_xml_id = crm_element_value(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value(xml_b, XML_ATTR_ID);
const char *a_node = crm_element_value(xml_a, XML_LRM_ATTR_TARGET);
const char *b_node = crm_element_value(xml_b, XML_LRM_ATTR_TARGET);
bool same_node = true;
/* @COMPAT The on_node attribute was added to last_failure as of 1.1.13 (via
* 8b3ca1c) and the other entries as of 1.1.12 (via 0b07b5c).
*
* In case that any of the lrm_rsc_op entries doesn't have on_node
* attribute, we need to explicitly tell whether the two operations are on
* the same node.
*/
if (a_node == NULL || b_node == NULL) {
same_node = same_node_default;
} else {
same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei);
}
if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unlikely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (a_call_id >= 0 && b_call_id >= 0
&& (!same_node || a_call_id == b_call_id)) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
time_t last_a = -1;
time_t last_b = -1;
crm_element_value_epoch(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_epoch(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %lld vs %lld",
(long long) last_a, (long long) last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
const char *a_magic = crm_element_value(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic a");
}
if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
NULL)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
return pe__is_newer_op(xml_a, xml_b, true);
}
/*!
* \internal
* \brief Create a new pseudo-action for a resource
*
* \param[in,out] rsc Resource to create action for
* \param[in] task Action name
* \param[in] optional Whether action should be considered optional
* \param[in] runnable Whethe action should be considered runnable
*
* \return New action object corresponding to arguments
*/
pe_action_t *
pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
pe_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
optional, TRUE, rsc->cluster);
pe__set_action_flags(action, pe_action_pseudo);
if (runnable) {
pe__set_action_flags(action, pe_action_runnable);
}
return action;
}
/*!
* \internal
* \brief Add the expected result to an action
*
* \param[in,out] action Action to add expected result to
* \param[in] expected_result Expected result to add
*
* \note This is more efficient than calling add_hash_param().
*/
void
pe__add_action_expected_result(pe_action_t *action, int expected_result)
{
char *name = NULL;
CRM_ASSERT((action != NULL) && (action->meta != NULL));
name = strdup(XML_ATTR_TE_TARGET_RC);
CRM_ASSERT (name != NULL);
g_hash_table_insert(action->meta, name, pcmk__itoa(expected_result));
}
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
index 3eb71d1e98..b5f91a3daf 100644
--- a/lib/pengine/remote.c
+++ b/lib/pengine/remote.c
@@ -1,271 +1,270 @@
/*
* Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
bool
-pe__resource_is_remote_conn(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__resource_is_remote_conn(const pe_resource_t *rsc)
{
return (rsc != NULL) && rsc->is_remote_node
- && pe__is_remote_node(pe_find_node(data_set->nodes, rsc->id));
+ && pe__is_remote_node(pe_find_node(rsc->cluster->nodes, rsc->id));
}
bool
pe__is_remote_node(const pe_node_t *node)
{
return (node != NULL) && (node->details->type == node_remote)
&& ((node->details->remote_rsc == NULL)
|| (node->details->remote_rsc->container == NULL));
}
bool
pe__is_guest_node(const pe_node_t *node)
{
return (node != NULL) && (node->details->type == node_remote)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container != NULL);
}
bool
pe__is_guest_or_remote_node(const pe_node_t *node)
{
return (node != NULL) && (node->details->type == node_remote);
}
bool
pe__is_bundle_node(const pe_node_t *node)
{
return pe__is_guest_node(node)
&& pe_rsc_is_bundled(node->details->remote_rsc);
}
/*!
* \internal
* \brief Check whether a resource creates a guest node
*
* If a given resource contains a filler resource that is a remote connection,
* return that filler resource (or NULL if none is found).
*
* \param[in] data_set Working set of cluster
* \param[in] rsc Resource to check
*
* \return Filler resource with remote connection, or NULL if none found
*/
pe_resource_t *
pe__resource_contains_guest_node(const pe_working_set_t *data_set,
const pe_resource_t *rsc)
{
if ((rsc != NULL) && (data_set != NULL)
&& pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
for (GList *gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
pe_resource_t *filler = gIter->data;
if (filler->is_remote_node) {
return filler;
}
}
}
return NULL;
}
bool
xml_contains_remote_node(xmlNode *xml)
{
const char *value = NULL;
if (xml == NULL) {
return false;
}
value = crm_element_value(xml, XML_ATTR_TYPE);
if (!pcmk__str_eq(value, "remote", pcmk__str_casei)) {
return false;
}
value = crm_element_value(xml, XML_AGENT_ATTR_CLASS);
if (!pcmk__str_eq(value, PCMK_RESOURCE_CLASS_OCF, pcmk__str_casei)) {
return false;
}
value = crm_element_value(xml, XML_AGENT_ATTR_PROVIDER);
if (!pcmk__str_eq(value, "pacemaker", pcmk__str_casei)) {
return false;
}
return true;
}
/*!
* \internal
* \brief Execute a supplied function for each guest node running on a host
*
* \param[in] data_set Working set for cluster
* \param[in] host Host node to check
* \param[in] helper Function to call for each guest node
* \param[in,out] user_data Pointer to pass to helper function
*/
void
pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
void (*helper)(const pe_node_t*, void*), void *user_data)
{
GList *iter;
CRM_CHECK(data_set && host && host->details && helper, return);
if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
return;
}
for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
pe_resource_t *rsc = (pe_resource_t *) iter->data;
if (rsc->is_remote_node && (rsc->container != NULL)) {
pe_node_t *guest_node = pe_find_node(data_set->nodes, rsc->id);
if (guest_node) {
(*helper)(guest_node, user_data);
}
}
}
}
/*!
* \internal
* \brief Create CIB XML for an implicit remote connection
*
* \param[in,out] parent If not NULL, use as parent XML element
* \param[in] uname Name of Pacemaker Remote node
* \param[in] container If not NULL, use this as connection container
* \param[in] migrateable If not NULL, use as allow-migrate value
* \param[in] is_managed If not NULL, use as is-managed value
* \param[in] start_timeout If not NULL, use as remote connect timeout
* \param[in] server If not NULL, use as remote server value
* \param[in] port If not NULL, use as remote port value
*
* \return Newly created XML
*/
xmlNode *
pe_create_remote_xml(xmlNode *parent, const char *uname,
const char *container_id, const char *migrateable,
const char *is_managed, const char *start_timeout,
const char *server, const char *port)
{
xmlNode *remote;
xmlNode *xml_sub;
remote = create_xml_node(parent, XML_CIB_TAG_RESOURCE);
// Add identity
crm_xml_add(remote, XML_ATTR_ID, uname);
crm_xml_add(remote, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
crm_xml_add(remote, XML_AGENT_ATTR_PROVIDER, "pacemaker");
crm_xml_add(remote, XML_ATTR_TYPE, "remote");
// Add meta-attributes
xml_sub = create_xml_node(remote, XML_TAG_META_SETS);
crm_xml_set_id(xml_sub, "%s-%s", uname, XML_TAG_META_SETS);
crm_create_nvpair_xml(xml_sub, NULL,
XML_RSC_ATTR_INTERNAL_RSC, XML_BOOLEAN_TRUE);
if (container_id) {
crm_create_nvpair_xml(xml_sub, NULL,
XML_RSC_ATTR_CONTAINER, container_id);
}
if (migrateable) {
crm_create_nvpair_xml(xml_sub, NULL,
XML_OP_ATTR_ALLOW_MIGRATE, migrateable);
}
if (is_managed) {
crm_create_nvpair_xml(xml_sub, NULL, XML_RSC_ATTR_MANAGED, is_managed);
}
// Add instance attributes
if (port || server) {
xml_sub = create_xml_node(remote, XML_TAG_ATTR_SETS);
crm_xml_set_id(xml_sub, "%s-%s", uname, XML_TAG_ATTR_SETS);
if (server) {
crm_create_nvpair_xml(xml_sub, NULL, XML_RSC_ATTR_REMOTE_RA_ADDR,
server);
}
if (port) {
crm_create_nvpair_xml(xml_sub, NULL, "port", port);
}
}
// Add operations
xml_sub = create_xml_node(remote, "operations");
crm_create_op_xml(xml_sub, uname, PCMK_ACTION_MONITOR, "30s", "30s");
if (start_timeout) {
crm_create_op_xml(xml_sub, uname, PCMK_ACTION_START, "0",
start_timeout);
}
return remote;
}
// History entry to be checked for fail count clearing
struct check_op {
const xmlNode *rsc_op; // History entry XML
pe_resource_t *rsc; // Known resource corresponding to history entry
pe_node_t *node; // Known node corresponding to history entry
enum pe_check_parameters check_type; // What needs checking
};
void
pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
pe_node_t *node, enum pe_check_parameters flag,
pe_working_set_t *data_set)
{
struct check_op *check_op = NULL;
CRM_CHECK(data_set && rsc_op && rsc && node, return);
check_op = calloc(1, sizeof(struct check_op));
CRM_ASSERT(check_op != NULL);
crm_trace("Deferring checks of %s until after allocation", ID(rsc_op));
check_op->rsc_op = rsc_op;
check_op->rsc = rsc;
check_op->node = node;
check_op->check_type = flag;
data_set->param_check = g_list_prepend(data_set->param_check, check_op);
}
/*!
* \internal
* \brief Call a function for each action to be checked for addr substitution
*
* \param[in,out] data_set Working set for cluster
* \param[in] cb Function to be called
*/
void
pe__foreach_param_check(pe_working_set_t *data_set,
void (*cb)(pe_resource_t*, pe_node_t*, const xmlNode*,
enum pe_check_parameters))
{
CRM_CHECK(data_set && cb, return);
for (GList *item = data_set->param_check; item != NULL; item = item->next) {
struct check_op *check_op = item->data;
cb(check_op->rsc, check_op->node, check_op->rsc_op,
check_op->check_type);
}
}
void
pe__free_param_checks(pe_working_set_t *data_set)
{
if (data_set && data_set->param_check) {
g_list_free_full(data_set->param_check, free);
data_set->param_check = NULL;
}
}
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index f2133d9d32..019725c36a 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2179 +1,2208 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
static GList *
build_node_info_list(const pe_resource_t *rsc)
{
GList *retval = NULL;
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
const pe_resource_t *child = (const pe_resource_t *) iter->data;
for (const GList *iter2 = child->running_on;
iter2 != NULL; iter2 = iter2->next) {
const pe_node_t *node = (const pe_node_t *) iter2->data;
node_info_t *ni = calloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) &&
child->fns->state(child, TRUE) == pcmk_role_promoted;
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
GList *
cli_resource_search(pe_resource_t *rsc, const char *requested_name,
pe_working_set_t *data_set)
{
GList *retval = NULL;
const pe_resource_t *parent = pe__const_top_resource(rsc, false);
if (pe_rsc_is_clone(rsc)) {
retval = build_node_info_list(rsc);
/* The anonymous clone children's common ID is supplied */
} else if (pe_rsc_is_clone(parent)
&& !pcmk_is_set(rsc->flags, pe_rsc_unique)
&& rsc->clone_name
&& pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
&& !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
retval = build_node_info_list(parent);
} else if (rsc->running_on != NULL) {
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
node_info_t *ni = calloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted);
retval = g_list_prepend(retval, ni);
}
}
return retval;
}
// \return Standard Pacemaker return code
static int
find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
const char *rsc, const char *attr_set_type, const char *set_name,
const char *attr_id, const char *attr_name, char **value)
{
int rc = pcmk_rc_ok;
xmlNode *xml_search = NULL;
GString *xpath = NULL;
const char *xpath_base = NULL;
if(value) {
*value = NULL;
}
if(the_cib == NULL) {
return ENOTCONN;
}
xpath_base = pcmk_cib_xpath_for(XML_CIB_TAG_RESOURCES);
if (xpath_base == NULL) {
crm_err(XML_CIB_TAG_RESOURCES " CIB element not known (bug?)");
return ENOMSG;
}
xpath = g_string_sized_new(1024);
pcmk__g_strcat(xpath,
xpath_base, "//*[@" XML_ATTR_ID "=\"", rsc, "\"]", NULL);
if (attr_set_type != NULL) {
pcmk__g_strcat(xpath, "/", attr_set_type, NULL);
if (set_name != NULL) {
pcmk__g_strcat(xpath, "[@" XML_ATTR_ID "=\"", set_name, "\"]",
NULL);
}
}
g_string_append(xpath, "//" XML_CIB_TAG_NVPAIR "[");
if (attr_id != NULL) {
pcmk__g_strcat(xpath, "@" XML_ATTR_ID "=\"", attr_id, "\"", NULL);
}
if (attr_name != NULL) {
if (attr_id != NULL) {
g_string_append(xpath, " and ");
}
pcmk__g_strcat(xpath, "@" XML_NVPAIR_ATTR_NAME "=\"", attr_name, "\"",
NULL);
}
g_string_append_c(xpath, ']');
rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search,
cib_sync_call | cib_scope_local | cib_xpath);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
goto done;
}
crm_log_xml_debug(xml_search, "Match");
if (xml_search->children != NULL) {
xmlNode *child = NULL;
rc = ENOTUNIQ;
out->info(out, "Multiple attributes match name=%s", attr_name);
for (child = pcmk__xml_first_child(xml_search); child != NULL;
child = pcmk__xml_next(child)) {
out->info(out, " Value: %s \t(id=%s)",
crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
}
out->spacer(out);
} else if(value) {
pcmk__str_update(value, crm_element_value(xml_search, attr));
}
done:
g_string_free(xpath, TRUE);
free_xml(xml_search);
return rc;
}
/* PRIVATE. Use the find_matching_attr_resources instead. */
static void
find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* */ ** result,
pe_resource_t * rsc, const char * rsc_id,
const char * attr_set, const char * attr_set_type,
const char * attr_id, const char * attr_name,
cib_t * cib, const char * cmd, int depth)
{
int rc = pcmk_rc_ok;
char *lookup_id = clone_strip(rsc->id);
char *local_attr_id = NULL;
/* visit the children */
for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data,
rsc_id, attr_set, attr_set_type,
attr_id, attr_name, cib, cmd, depth+1);
/* do it only once for clones */
if(pe_clone == rsc->variant) {
break;
}
}
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
/* Post-order traversal.
* The root is always on the list and it is the last item. */
if((0 == depth) || (pcmk_rc_ok == rc)) {
/* push the head */
*result = g_list_append(*result, rsc);
}
free(local_attr_id);
free(lookup_id);
}
/* The result is a linearized pre-ordered tree of resources. */
static GList/**/ *
find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
const char * rsc_id, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, const char * cmd,
gboolean force)
{
int rc = pcmk_rc_ok;
char *lookup_id = NULL;
char *local_attr_id = NULL;
GList * result = NULL;
/* If --force is used, update only the requested resource (clone or primitive).
* Otherwise, if the primitive has the attribute, use that.
* Otherwise use the clone. */
if(force == TRUE) {
return g_list_append(result, rsc);
}
if(rsc->parent && pe_clone == rsc->parent->variant) {
int rc = pcmk_rc_ok;
char *local_attr_id = NULL;
rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
free(local_attr_id);
if(rc != pcmk_rc_ok) {
rsc = rsc->parent;
out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
cmd, attr_name, rsc->id, rsc_id);
}
return g_list_append(result, rsc);
} else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) {
pe_resource_t *child = rsc->children->data;
if(child->variant == pe_native) {
lookup_id = clone_strip(child->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
if(rc == pcmk_rc_ok) {
rsc = child;
out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
attr_name, lookup_id, cmd, rsc_id);
}
free(local_attr_id);
free(lookup_id);
}
return g_list_append(result, rsc);
}
/* If the resource is a group ==> children inherit the attribute if defined. */
find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set,
attr_set_type, attr_id, attr_name,
cib, cmd, 0);
return result;
}
// \return Standard Pacemaker return code
int
cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive,
cib_t *cib, int cib_options, gboolean force)
{
pcmk__output_t *out = rsc->cluster->priv;
int rc = pcmk_rc_ok;
char *found_attr_id = NULL;
GList/**/ *resources = NULL;
const char *top_id = pe__const_top_resource(rsc, false)->id;
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, XML_ATTR_ID, top_id, NULL, NULL, NULL,
attr_name, NULL);
}
if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
if (!force) {
rc = find_resource_attr(out, cib, XML_ATTR_ID, top_id,
XML_TAG_META_SETS, attr_set, attr_id,
attr_name, &found_attr_id);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
out->err(out,
"WARNING: There is already a meta attribute "
"for '%s' called '%s' (id=%s)",
top_id, attr_name, found_attr_id);
out->err(out,
" Delete '%s' first or use the force option "
"to override", found_attr_id);
}
free(found_attr_id);
if (rc == pcmk_rc_ok) {
return ENOTUNIQ;
}
}
resources = g_list_append(resources, rsc);
} else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
crm_xml_add(rsc->xml, attr_name, attr_value);
CRM_ASSERT(cib != NULL);
rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml,
cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Set attribute: name=%s value=%s",
attr_name, attr_value);
}
return rc;
} else {
resources = find_matching_attr_resources(out, rsc, requested_name,
attr_set, attr_set_type,
attr_id, attr_name, cib,
"update", force);
}
/* If the user specified attr_set or attr_id, the intent is to modify a
* single resource, which will be the last item in the list.
*/
if ((attr_set != NULL) || (attr_id != NULL)) {
GList *last = g_list_last(resources);
resources = g_list_remove_link(resources, last);
g_list_free(resources);
resources = last;
}
for (GList *iter = resources; iter != NULL; iter = iter->next) {
char *lookup_id = NULL;
char *local_attr_set = NULL;
const char *rsc_attr_id = attr_id;
const char *rsc_attr_set = attr_set;
xmlNode *xml_top = NULL;
xmlNode *xml_obj = NULL;
found_attr_id = NULL;
rsc = (pe_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &found_attr_id);
switch (rc) {
case pcmk_rc_ok:
crm_debug("Found a match for name=%s: id=%s",
attr_name, found_attr_id);
rsc_attr_id = found_attr_id;
break;
case ENXIO:
if (rsc_attr_set == NULL) {
local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
attr_set_type);
rsc_attr_set = local_attr_set;
}
if (rsc_attr_id == NULL) {
found_attr_id = crm_strdup_printf("%s-%s",
rsc_attr_set, attr_name);
rsc_attr_id = found_attr_id;
}
xml_top = create_xml_node(NULL, (const char *) rsc->xml->name);
crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
xml_obj = create_xml_node(xml_top, attr_set_type);
crm_xml_add(xml_obj, XML_ATTR_ID, rsc_attr_set);
break;
default:
free(lookup_id);
free(found_attr_id);
g_list_free(resources);
return rc;
}
xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name,
attr_value);
if (xml_top == NULL) {
xml_top = xml_obj;
}
crm_log_xml_debug(xml_top, "Update");
rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top,
cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s",
lookup_id, found_attr_id,
((rsc_attr_set == NULL)? "" : " set="),
pcmk__s(rsc_attr_set, ""),
((attr_name == NULL)? "" : " name="),
pcmk__s(attr_name, ""), attr_value);
}
free_xml(xml_top);
free(lookup_id);
free(found_attr_id);
free(local_attr_set);
if (recursive
&& pcmk__str_eq(attr_set_type, XML_TAG_META_SETS,
pcmk__str_casei)) {
GList *lpc = NULL;
static bool need_init = true;
if (need_init) {
need_init = false;
pcmk__unpack_constraints(rsc->cluster);
pe__clear_resource_flags_on_all(rsc->cluster,
pe_rsc_detect_loop);
}
/* We want to set the attribute only on resources explicitly
* colocated with this one, so we use rsc->rsc_cons_lhs directly
* rather than the with_this_colocations() method.
*/
pe__set_resource_flags(rsc, pe_rsc_detect_loop);
for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
crm_debug("Checking %s %d", cons->id, cons->score);
if (!pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)
&& (cons->score > 0)) {
crm_debug("Setting %s=%s for dependent resource %s",
attr_name, attr_value, cons->dependent->id);
cli_resource_update_attribute(cons->dependent,
cons->dependent->id, NULL,
attr_set_type, NULL,
attr_name, attr_value,
recursive, cib, cib_options,
force);
}
}
}
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
cib_t *cib, int cib_options, gboolean force)
{
pcmk__output_t *out = rsc->cluster->priv;
int rc = pcmk_rc_ok;
GList/**/ *resources = NULL;
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, XML_ATTR_ID,
pe__const_top_resource(rsc, false)->id, NULL,
NULL, NULL, attr_name, NULL);
}
if (pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
resources = find_matching_attr_resources(out, rsc, requested_name,
attr_set, attr_set_type,
attr_id, attr_name, cib,
"delete", force);
} else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
xml_remove_prop(rsc->xml, attr_name);
CRM_ASSERT(cib != NULL);
rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml,
cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted attribute: %s", attr_name);
}
return rc;
} else {
resources = g_list_append(resources, rsc);
}
for (GList *iter = resources; iter != NULL; iter = iter->next) {
char *lookup_id = NULL;
xmlNode *xml_obj = NULL;
char *found_attr_id = NULL;
const char *rsc_attr_id = attr_id;
rsc = (pe_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id);
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &found_attr_id);
switch (rc) {
case pcmk_rc_ok:
break;
case ENXIO:
free(lookup_id);
rc = pcmk_rc_ok;
continue;
default:
free(lookup_id);
g_list_free(resources);
return rc;
}
if (rsc_attr_id == NULL) {
rsc_attr_id = found_attr_id;
}
xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL);
crm_log_xml_debug(xml_obj, "Delete");
CRM_ASSERT(cib);
rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj,
cib_options);
rc = pcmk_legacy2rc(rc);
if (rc == pcmk_rc_ok) {
out->info(out, "Deleted '%s' option: id=%s%s%s%s%s",
lookup_id, found_attr_id,
((attr_set == NULL)? "" : " set="),
pcmk__s(attr_set, ""),
((attr_name == NULL)? "" : " name="),
pcmk__s(attr_name, ""));
}
free(lookup_id);
free_xml(xml_obj);
free(found_attr_id);
}
g_list_free(resources);
return rc;
}
// \return Standard Pacemaker return code
static int
send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
const char *host_uname, const char *rsc_id, pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
const char *router_node = host_uname;
const char *rsc_api_id = NULL;
const char *rsc_long_id = NULL;
const char *rsc_class = NULL;
const char *rsc_provider = NULL;
const char *rsc_type = NULL;
bool cib_only = false;
pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
if (rsc == NULL) {
out->err(out, "Resource %s not found", rsc_id);
return ENXIO;
} else if (rsc->variant != pe_native) {
out->err(out, "We can only process primitive resources, not %s", rsc_id);
return EINVAL;
}
rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
if ((rsc_class == NULL) || (rsc_type == NULL)) {
out->err(out, "Resource %s does not have a class and type", rsc_id);
return EINVAL;
}
{
pe_node_t *node = pe_find_node(data_set->nodes, host_uname);
if (node == NULL) {
out->err(out, "Node %s not found", host_uname);
return pcmk_rc_node_unknown;
}
if (!(node->details->online)) {
if (do_fail_resource) {
out->err(out, "Node %s is not online", host_uname);
return ENOTCONN;
} else {
cib_only = true;
}
}
if (!cib_only && pe__is_guest_or_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
if (node == NULL) {
out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
host_uname);
return ENOTCONN;
}
router_node = node->details->uname;
}
}
if (rsc->clone_name) {
rsc_api_id = rsc->clone_name;
rsc_long_id = rsc->id;
} else {
rsc_api_id = rsc->id;
}
if (do_fail_resource) {
return pcmk_controld_api_fail(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id,
rsc_class, rsc_provider, rsc_type);
} else {
return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
rsc_api_id, rsc_long_id, rsc_class,
rsc_provider, rsc_type, cib_only);
}
}
/*!
* \internal
* \brief Get resource name as used in failure-related node attributes
*
* \param[in] rsc Resource to check
*
* \return Newly allocated string containing resource's fail name
* \note The caller is responsible for freeing the result.
*/
static inline char *
rsc_fail_name(const pe_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
}
// \return Standard Pacemaker return code
static int
clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
/* Erase the resource's entire LRM history in the CIB, even if we're only
* clearing a single operation's fail count. If we erased only entries for a
* single operation, we might wind up with a wrong idea of the current
* resource state, and we might not re-probe the resource.
*/
rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set);
if (rc != pcmk_rc_ok) {
return rc;
}
crm_trace("Processing %d mainloop inputs",
pcmk_controld_api_replies_expected(controld_api));
while (g_main_context_iteration(NULL, FALSE)) {
crm_trace("Processed mainloop input, %d still remaining",
pcmk_controld_api_replies_expected(controld_api));
}
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
const char *node_name, const char *rsc_id, const char *operation,
const char *interval_spec, pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
const char *failed_value = NULL;
const char *failed_id = NULL;
const char *interval_ms_s = NULL;
GHashTable *rscs = NULL;
GHashTableIter iter;
/* Create a hash table to use as a set of resources to clean. This lets us
* clean each resource only once (per node) regardless of how many failed
* operations it has.
*/
rscs = pcmk__strkey_table(NULL, NULL);
// Normalize interval to milliseconds for comparison to history entry
if (operation) {
interval_ms_s = crm_strdup_printf("%u",
crm_parse_interval_spec(interval_spec));
}
for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed);
xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
if (failed_id == NULL) {
// Malformed history entry, should never happen
continue;
}
// No resource specified means all resources match
if (rsc_id) {
pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
failed_id,
pe_find_renamed|pe_find_anon);
if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
continue;
}
}
// Host name should always have been provided by this point
failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
continue;
}
// No operation specified means all operations match
if (operation) {
failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
continue;
}
// Interval (if operation was specified) defaults to 0 (not all)
failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
continue;
}
}
g_hash_table_add(rscs, (gpointer) failed_id);
}
g_hash_table_iter_init(&iter, rscs);
while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
crm_debug("Erasing failures of %s on %s", failed_id, node_name);
rc = clear_rsc_history(controld_api, node_name, failed_id, data_set);
if (rc != pcmk_rc_ok) {
return rc;
}
}
g_hash_table_destroy(rscs);
return rc;
}
// \return Standard Pacemaker return code
static int
clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation,
const char *interval_spec, const pe_node_t *node)
{
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
char *rsc_name = rsc_fail_name(rsc);
if (pe__is_guest_or_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name,
operation, interval_spec, NULL,
attr_options);
free(rsc_name);
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
const pe_resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
pe_working_set_t *data_set, gboolean force)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
pe_node_t *node = NULL;
if (rsc == NULL) {
return ENXIO;
} else if (rsc->children) {
for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
const pe_resource_t *child = (const pe_resource_t *) lpc->data;
rc = cli_resource_delete(controld_api, host_uname, child, operation,
interval_spec, just_failures, data_set, force);
if (rc != pcmk_rc_ok) {
return rc;
}
}
return pcmk_rc_ok;
} else if (host_uname == NULL) {
GList *lpc = NULL;
GList *nodes = g_hash_table_get_values(rsc->known_on);
if(nodes == NULL && force) {
nodes = pcmk__copy_node_list(data_set->nodes, false);
} else if(nodes == NULL && rsc->exclusive_discover) {
GHashTableIter iter;
pe_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
if(node->weight >= 0) {
nodes = g_list_prepend(nodes, node);
}
}
} else if(nodes == NULL) {
nodes = g_hash_table_get_values(rsc->allowed_nodes);
}
for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
node = (pe_node_t *) lpc->data;
if (node->details->online) {
rc = cli_resource_delete(controld_api, node->details->uname, rsc,
operation, interval_spec, just_failures,
data_set, force);
}
if (rc != pcmk_rc_ok) {
g_list_free(nodes);
return rc;
}
}
g_list_free(nodes);
return pcmk_rc_ok;
}
node = pe_find_node(data_set->nodes, host_uname);
if (node == NULL) {
out->err(out, "Unable to clean up %s because node %s not found",
rsc->id, host_uname);
return ENODEV;
}
if (!node->details->rsc_discovery_enabled) {
out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
rsc->id, host_uname);
return EOPNOTSUPP;
}
if (controld_api == NULL) {
out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
rsc->id, host_uname);
return pcmk_rc_ok;
}
rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up %s failures on %s: %s",
rsc->id, host_uname, pcmk_rc_str(rc));
return rc;
}
if (just_failures) {
rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
interval_spec, data_set);
} else {
rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set);
}
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
rsc->id, host_uname, pcmk_strerror(rc));
} else {
out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
}
return rc;
}
// \return Standard Pacemaker return code
int
cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
const char *operation, const char *interval_spec,
pe_working_set_t *data_set)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
const char *display_name = node_name? node_name : "all nodes";
if (controld_api == NULL) {
out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
display_name);
return rc;
}
if (node_name) {
pe_node_t *node = pe_find_node(data_set->nodes, node_name);
if (node == NULL) {
out->err(out, "Unknown node: %s", node_name);
return ENXIO;
}
if (pe__is_guest_or_remote_node(node)) {
attr_options |= pcmk__node_attr_remote;
}
}
rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation,
interval_spec, NULL, attr_options);
if (rc != pcmk_rc_ok) {
out->err(out, "Unable to clean up all failures on %s: %s",
display_name, pcmk_rc_str(rc));
return rc;
}
if (node_name) {
rc = clear_rsc_failures(out, controld_api, node_name, NULL,
operation, interval_spec, data_set);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
node_name, pcmk_strerror(rc));
return rc;
}
} else {
for (GList *iter = data_set->nodes; iter; iter = iter->next) {
pe_node_t *node = (pe_node_t *) iter->data;
rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
operation, interval_spec, data_set);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
pcmk_strerror(rc));
return rc;
}
}
}
out->info(out, "Cleaned up all resources on %s", display_name);
return rc;
}
static void
check_role(resource_checks_t *checks)
{
const char *role_s = g_hash_table_lookup(checks->rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if (role_s == NULL) {
return;
}
switch (text2role(role_s)) {
case pcmk_role_stopped:
checks->flags |= rsc_remain_stopped;
break;
case pcmk_role_unpromoted:
if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
pe_rsc_promotable)) {
checks->flags |= rsc_unpromotable;
}
break;
default:
break;
}
}
static void
check_managed(resource_checks_t *checks)
{
const char *managed_s = g_hash_table_lookup(checks->rsc->meta,
XML_RSC_ATTR_MANAGED);
if ((managed_s != NULL) && !crm_is_true(managed_s)) {
checks->flags |= rsc_unmanaged;
}
}
static void
check_locked(resource_checks_t *checks)
{
if (checks->rsc->lock_node != NULL) {
checks->flags |= rsc_locked;
checks->lock_node = checks->rsc->lock_node->details->uname;
}
}
static bool
node_is_unhealthy(pe_node_t *node)
{
switch (pe__health_strategy(node->details->data_set)) {
case pcmk__health_strategy_none:
break;
case pcmk__health_strategy_no_red:
if (pe__node_health(node) < 0) {
return true;
}
break;
case pcmk__health_strategy_only_green:
if (pe__node_health(node) <= 0) {
return true;
}
break;
case pcmk__health_strategy_progressive:
case pcmk__health_strategy_custom:
/* @TODO These are finite scores, possibly with rules, and possibly
* combining with other scores, so attributing these as a cause is
* nontrivial.
*/
break;
}
return false;
}
static void
check_node_health(resource_checks_t *checks, pe_node_t *node)
{
if (node == NULL) {
GHashTableIter iter;
bool allowed = false;
bool all_nodes_unhealthy = true;
g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
allowed = true;
if (!node_is_unhealthy(node)) {
all_nodes_unhealthy = false;
break;
}
}
if (allowed && all_nodes_unhealthy) {
checks->flags |= rsc_node_health;
}
} else if (node_is_unhealthy(node)) {
checks->flags |= rsc_node_health;
}
}
int
cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
{
resource_checks_t checks = { .rsc = rsc };
check_role(&checks);
check_managed(&checks);
check_locked(&checks);
check_node_health(&checks, node);
return out->message(out, "resource-check-list", &checks);
}
// \return Standard Pacemaker return code
int
cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
const char *rsc_id, pe_working_set_t *data_set)
{
crm_notice("Failing %s on %s", rsc_id, host_uname);
return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set);
}
static GHashTable *
generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTable *combined = NULL;
GHashTableIter iter;
char *key = NULL;
char *value = NULL;
combined = pcmk__strkey_table(free, free);
params = pe_rsc_params(rsc, node, data_set);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
g_hash_table_insert(combined, strdup(key), strdup(value));
}
}
meta = pcmk__strkey_table(free, free);
get_meta_attributes(meta, rsc, node, data_set);
if (meta != NULL) {
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
char *crm_name = crm_meta_name(key);
g_hash_table_insert(combined, crm_name, strdup(value));
}
g_hash_table_destroy(meta);
}
return combined;
}
bool resource_is_running_on(pe_resource_t *rsc, const char *host)
{
bool found = true;
GList *hIter = NULL;
GList *hosts = NULL;
if (rsc == NULL) {
return false;
}
rsc->fns->location(rsc, &hosts, TRUE);
for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
pe_node_t *node = (pe_node_t *) hIter->data;
if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
goto done;
}
}
if (host != NULL) {
crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
found = false;
} else if(host == NULL && hosts == NULL) {
crm_trace("Resource %s is not running\n", rsc->id);
found = false;
}
done:
g_list_free(hosts);
return found;
}
/*!
* \internal
* \brief Create a list of all resources active on host from a given list
*
* \param[in] host Name of host to check whether resources are active
* \param[in] rsc_list List of resources to check
*
* \return New list of resources from list that are active on host
*/
static GList *
get_active_resources(const char *host, GList *rsc_list)
{
GList *rIter = NULL;
GList *active = NULL;
for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
pe_resource_t *rsc = (pe_resource_t *) rIter->data;
/* Expand groups to their members, because if we're restarting a member
* other than the first, we can't otherwise tell which resources are
* stopping and starting.
*/
if (rsc->variant == pe_group) {
active = g_list_concat(active,
get_active_resources(host, rsc->children));
} else if (resource_is_running_on(rsc, host)) {
active = g_list_append(active, strdup(rsc->id));
}
}
return active;
}
static void dump_list(GList *items, const char *tag)
{
int lpc = 0;
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
lpc++;
}
}
static void display_list(pcmk__output_t *out, GList *items, const char *tag)
{
GList *item = NULL;
for (item = items; item != NULL; item = item->next) {
out->info(out, "%s%s", tag, (const char *)item->data);
}
}
/*!
* \internal
* \brief Upgrade XML to latest schema version and use it as working set input
*
* This also updates the working set timestamp to the current time.
*
* \param[in,out] data_set Working set instance to update
* \param[in,out] xml XML to use as input
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
* data_set->now.
* \todo This follows the example of other callers of cli_config_update()
* and returns ENOKEY ("Required key not available") if that fails,
* but perhaps pcmk_rc_schema_validation would be better in that case.
*/
int
update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
{
if (cli_config_update(xml, NULL, FALSE) == FALSE) {
return ENOKEY;
}
data_set->input = *xml;
data_set->now = crm_time_new(NULL);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Update a working set's XML input based on a CIB query
*
* \param[in] data_set Data set instance to initialize
* \param[in] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
* data_set->input and data_set->now.
*/
static int
update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
cib_t *cib)
{
xmlNode *cib_xml_copy = NULL;
int rc = pcmk_rc_ok;
rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc);
return rc;
}
rc = update_working_set_xml(data_set, &cib_xml_copy);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not upgrade the current CIB XML");
free_xml(cib_xml_copy);
return rc;
}
return rc;
}
// \return Standard Pacemaker return code
static int
update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
{
char *pid = NULL;
char *shadow_file = NULL;
cib_t *shadow_cib = NULL;
int rc = pcmk_rc_ok;
pcmk__output_t *out = data_set->priv;
pe_reset_working_set(data_set);
pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
rc = update_working_set_from_cib(out, data_set, cib);
if (rc != pcmk_rc_ok) {
return rc;
}
if(simulate) {
bool prev_quiet = false;
pid = pcmk__getpid_s();
shadow_cib = cib_shadow_new(pid);
shadow_file = get_shadow_file(pid);
if (shadow_cib == NULL) {
out->err(out, "Could not create shadow cib: '%s'", pid);
rc = ENXIO;
goto done;
}
rc = write_xml_file(data_set->input, shadow_file, FALSE);
if (rc < 0) {
out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
goto done;
}
rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc);
goto done;
}
pcmk__schedule_actions(data_set->input,
pe_flag_no_counts|pe_flag_no_compat, data_set);
prev_quiet = out->is_quiet(out);
out->quiet = true;
pcmk__simulate_transition(data_set, shadow_cib, NULL);
out->quiet = prev_quiet;
rc = update_dataset(shadow_cib, data_set, false);
} else {
cluster_status(data_set);
}
done:
/* Do not free data_set->input here, we need rsc->xml to be valid later on */
cib_delete(shadow_cib);
free(pid);
if(shadow_file) {
unlink(shadow_file);
free(shadow_file);
}
return rc;
}
+/*!
+ * \internal
+ * \brief Find the maximum stop timeout of a resource and its children (if any)
+ *
+ * \param[in,out] rsc Resource to get timeout for
+ *
+ * \return Maximum stop timeout for \p rsc (in milliseconds)
+ */
static int
-max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc)
+max_rsc_stop_timeout(pe_resource_t *rsc)
{
- int delay = 0;
+ pe_action_t *stop = NULL;
+ long long result_ll;
int max_delay = 0;
- if(rsc && rsc->children) {
- GList *iter = NULL;
+ if (rsc == NULL) {
+ return 0;
+ }
- for(iter = rsc->children; iter; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *)iter->data;
+ // If resource is collective, use maximum of its children's stop timeouts
+ if (rsc->children != NULL) {
+ for (GList *iter = rsc->children; iter; iter = iter->next) {
+ pe_resource_t *child = iter->data;
+ int delay = max_rsc_stop_timeout(child);
- delay = max_delay_for_resource(data_set, child);
- if(delay > max_delay) {
- double seconds = delay / 1000.0;
- crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
+ if (delay > max_delay) {
+ pe_rsc_trace(rsc,
+ "Maximum stop timeout for %s is now %s due to %s",
+ rsc->id, pcmk__readable_interval(delay), child->id);
max_delay = delay;
}
}
-
- } else if(rsc) {
- char *key = crm_strdup_printf("%s_%s_0", rsc->id, PCMK_ACTION_STOP);
- pe_action_t *stop = custom_action(rsc, key, PCMK_ACTION_STOP, NULL,
- TRUE, FALSE, data_set);
- const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
- long long result_ll;
-
- if ((pcmk__scan_ll(value, &result_ll, -1LL) == pcmk_rc_ok)
- && (result_ll >= 0) && (result_ll <= INT_MAX)) {
- max_delay = (int) result_ll;
- } else {
- max_delay = -1;
- }
- pe_free_action(stop);
+ return max_delay;
}
+ /* Create a (transient) instance of the resource's stop action, to fully
+ * evaluate its timeout for rules, defaults, etc.
+ *
+ * @TODO This currently ignores node (which might matter for rules)
+ */
+ stop = custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, NULL, TRUE,
+ FALSE, rsc->cluster);
+ if ((pcmk__scan_ll(g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT),
+ &result_ll, -1LL) == pcmk_rc_ok)
+ && (result_ll >= 0) && (result_ll <= INT_MAX)) {
+ max_delay = (int) result_ll;
+ }
+ pe_free_action(stop);
return max_delay;
}
+/*!
+ * \internal
+ * \brief Find a reasonable waiting time for stopping any one resource in a list
+ *
+ * \param[in,out] data_set Cluster working set
+ * \param[in] resources List of names of resources that will be stopped
+ *
+ * \return Rough estimate of a reasonable time to wait (in seconds) to stop any
+ * one resource in \p resources
+ * \note This estimate is very rough, simply the maximum stop timeout of all
+ * given resources and their children, plus a small fudge factor. It does
+ * not account for children that must be stopped in sequence, action
+ * throttling, or any demotions needed. It checks the stop timeout, even
+ * if the resources in question are actually being started.
+ */
static int
-max_delay_in(pe_working_set_t * data_set, GList *resources)
+wait_time_estimate(pe_working_set_t *data_set, const GList *resources)
{
int max_delay = 0;
- GList *item = NULL;
-
- for (item = resources; item != NULL; item = item->next) {
- int delay = 0;
- pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
- delay = max_delay_for_resource(data_set, rsc);
+ // Find maximum stop timeout in milliseconds
+ for (const GList *item = resources; item != NULL; item = item->next) {
+ pe_resource_t *rsc = pe_find_resource(data_set->resources,
+ (const char *) (item->data));
+ int delay = max_rsc_stop_timeout(rsc);
- if(delay > max_delay) {
- double seconds = delay / 1000.0;
- crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
+ if (delay > max_delay) {
+ pe_rsc_trace(rsc,
+ "Wait time is now %s due to %s",
+ pcmk__readable_interval(delay), rsc->id);
max_delay = delay;
}
}
- return 5 + (max_delay / 1000);
+ return (max_delay / 1000) + 5;
}
#define waiting_for_starts(d, r, h) ((d != NULL) || \
(!resource_is_running_on((r), (h))))
/*!
* \internal
* \brief Restart a resource (on a particular host if requested).
*
* \param[in,out] out Output object
* \param[in,out] rsc The resource to restart
* \param[in] node Node to restart resource on (NULL for all)
* \param[in] move_lifetime If not NULL, how long constraint should
* remain in effect (as ISO 8601 string)
* \param[in] timeout_ms Consider failed if actions do not complete
* in this time (specified in milliseconds,
* but a two-second granularity is actually
* used; if 0, it will be calculated based on
* the resource timeout)
* \param[in,out] cib Connection to the CIB manager
* \param[in] cib_options Group of enum cib_call_options flags to
* use with CIB calls
* \param[in] promoted_role_only If true, limit to promoted instances
* \param[in] force If true, apply only to requested instance
* if part of a collective resource
*
* \return Standard Pacemaker return code (exits on certain failures)
*/
int
cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
const pe_node_t *node, const char *move_lifetime,
int timeout_ms, cib_t *cib, int cib_options,
gboolean promoted_role_only, gboolean force)
{
int rc = pcmk_rc_ok;
int lpc = 0;
int before = 0;
int step_timeout_s = 0;
int sleep_interval = 2;
int timeout = timeout_ms / 1000;
bool stop_via_ban = false;
char *rsc_id = NULL;
char *lookup_id = NULL;
char *orig_target_role = NULL;
GList *list_delta = NULL;
GList *target_active = NULL;
GList *current_active = NULL;
GList *restart_target_active = NULL;
pe_working_set_t *data_set = NULL;
pe_resource_t *parent = uber_parent(rsc);
bool running = false;
const char *id = rsc->clone_name ? rsc->clone_name : rsc->id;
const char *host = node ? node->details->uname : NULL;
/* If the implicit resource or primitive resource of a bundle is given, operate on the
* bundle itself instead.
*/
if (pe_rsc_is_bundled(rsc)) {
rsc = parent->parent;
}
running = resource_is_running_on(rsc, host);
if (pe_rsc_is_clone(parent) && !running) {
if (pe_rsc_is_unique_clone(parent)) {
lookup_id = strdup(rsc->id);
} else {
lookup_id = clone_strip(rsc->id);
}
rsc = parent->fns->find_rsc(parent, lookup_id, node, pe_find_any|pe_find_current);
free(lookup_id);
running = resource_is_running_on(rsc, host);
}
if (!running) {
if (host) {
out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
} else {
out->err(out, "%s is not running anywhere and so cannot be restarted", id);
}
return ENXIO;
}
rsc_id = strdup(rsc->id);
if (pe_rsc_is_unique_clone(parent)) {
lookup_id = strdup(rsc->id);
} else {
lookup_id = clone_strip(rsc->id);
}
if (host) {
if (pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) {
stop_via_ban = true;
} else if (pe_rsc_is_clone(parent)) {
stop_via_ban = true;
free(lookup_id);
lookup_id = strdup(parent->id);
}
}
/*
grab full cib
determine originally active resources
disable or ban
poll cib and watch for affected resources to get stopped
without --timeout, calculate the stop timeout for each step and wait for that
if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
if everything stopped, re-enable or un-ban
poll cib and watch for affected resources to get started
without --timeout, calculate the start timeout for each step and wait for that
if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
report success
Optimizations:
- use constraints to determine ordered list of affected resources
- Allow a --no-deps option (aka. --force-restart)
*/
data_set = pe_new_working_set();
if (data_set == NULL) {
crm_perror(LOG_ERR, "Could not allocate working set");
rc = ENOMEM;
goto done;
}
data_set->priv = out;
rc = update_dataset(cib, data_set, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc);
goto done;
}
restart_target_active = get_active_resources(host, data_set->resources);
current_active = get_active_resources(host, data_set->resources);
dump_list(current_active, "Origin");
if (stop_via_ban) {
/* Stop the clone or bundle instance by banning it from the host */
out->quiet = true;
rc = cli_resource_ban(out, lookup_id, host, move_lifetime, NULL, cib,
cib_options, promoted_role_only);
} else {
/* Stop the resource by setting target-role to Stopped.
* Remember any existing target-role so we can restore it later
* (though it only makes any difference if it's Unpromoted).
*/
find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE,
PCMK_ACTION_STOPPED, FALSE, cib,
cib_options, force);
}
if(rc != pcmk_rc_ok) {
out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
if (current_active != NULL) {
g_list_free_full(current_active, free);
current_active = NULL;
}
if (restart_target_active != NULL) {
g_list_free_full(restart_target_active, free);
restart_target_active = NULL;
}
goto done;
}
rc = update_dataset(cib, data_set, true);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources would be stopped");
goto failure;
}
target_active = get_active_resources(host, data_set->resources);
dump_list(target_active, "Target");
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (list_delta != NULL) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
- step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
+ step_timeout_s = wait_time_estimate(data_set, list_delta)
+ / sleep_interval;
}
/* We probably don't need the entire step timeout */
for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, data_set, FALSE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were stopped");
goto failure;
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
current_active = NULL;
}
current_active = get_active_resources(host, data_set->resources);
g_list_free(list_delta);
list_delta = NULL;
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
if(before == g_list_length(list_delta)) {
/* aborted during stop phase, print the contents of list_delta */
out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
if (stop_via_ban) {
rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
} else if (orig_target_role) {
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE,
orig_target_role, FALSE, cib,
cib_options, force);
free(orig_target_role);
orig_target_role = NULL;
} else {
rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
cib_options, force);
}
if(rc != pcmk_rc_ok) {
out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
goto done;
}
if (target_active != NULL) {
g_list_free_full(target_active, free);
target_active = NULL;
}
target_active = restart_target_active;
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
step_timeout_s = timeout / sleep_interval;
while (waiting_for_starts(list_delta, rsc, host)) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
- step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
+ step_timeout_s = wait_time_estimate(data_set, list_delta)
+ / sleep_interval;
}
/* We probably don't need the entire step timeout */
for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
sleep(sleep_interval);
if(timeout) {
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
rc = update_dataset(cib, data_set, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were started");
goto failure;
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
current_active = NULL;
}
/* It's OK if dependent resources moved to a different node,
* so we check active resources on all nodes.
*/
current_active = get_active_resources(NULL, data_set->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
if(before == g_list_length(list_delta)) {
/* aborted during start phase, print the contents of list_delta */
out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
display_list(out, list_delta, " * ");
rc = ETIME;
goto failure;
}
}
rc = pcmk_rc_ok;
goto done;
failure:
if (stop_via_ban) {
cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
} else if (orig_target_role) {
cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
XML_RSC_ATTR_TARGET_ROLE, orig_target_role,
FALSE, cib, cib_options, force);
free(orig_target_role);
} else {
cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
cib_options, force);
}
done:
if (list_delta != NULL) {
g_list_free(list_delta);
}
if (current_active != NULL) {
g_list_free_full(current_active, free);
}
if (target_active != NULL && (target_active != restart_target_active)) {
g_list_free_full(target_active, free);
}
if (restart_target_active != NULL) {
g_list_free_full(restart_target_active, free);
}
free(rsc_id);
free(lookup_id);
pe_free_working_set(data_set);
return rc;
}
static inline bool
action_is_pending(const pe_action_t *action)
{
if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo)
|| !pcmk_is_set(action->flags, pe_action_runnable)
|| pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
return false;
}
return true;
}
/*!
* \internal
* \brief Check whether any actions in a list are pending
*
* \param[in] actions List of actions to check
*
* \return true if any actions in the list are pending, otherwise false
*/
static bool
actions_are_pending(const GList *actions)
{
for (const GList *action = actions; action != NULL; action = action->next) {
const pe_action_t *a = (const pe_action_t *) action->data;
if (action_is_pending(a)) {
crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
return true;
}
}
return false;
}
static void
print_pending_actions(pcmk__output_t *out, GList *actions)
{
GList *action;
out->info(out, "Pending actions:");
for (action = actions; action != NULL; action = action->next) {
pe_action_t *a = (pe_action_t *) action->data;
if (!action_is_pending(a)) {
continue;
}
if (a->node) {
out->info(out, "\tAction %d: %s\ton %s",
a->id, a->uuid, pe__node_name(a->node));
} else {
out->info(out, "\tAction %d: %s", a->id, a->uuid);
}
}
}
/* For --wait, timeout (in seconds) to use if caller doesn't specify one */
#define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
/* For --wait, how long to sleep between cluster state checks */
#define WAIT_SLEEP_S (2)
/*!
* \internal
* \brief Wait until all pending cluster actions are complete
*
* This waits until either the CIB's transition graph is idle or a timeout is
* reached.
*
* \param[in,out] out Output object
* \param[in] timeout_ms Consider failed if actions do not complete in
* this time (specified in milliseconds, but
* one-second granularity is actually used; if 0, a
* default will be used)
* \param[in,out] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
*/
int
wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
{
pe_working_set_t *data_set = NULL;
int rc = pcmk_rc_ok;
int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
time_t expire_time = time(NULL) + timeout_s;
time_t time_diff;
bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
data_set = pe_new_working_set();
if (data_set == NULL) {
return ENOMEM;
}
do {
/* Abort if timeout is reached */
time_diff = expire_time - time(NULL);
if (time_diff > 0) {
crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff);
} else {
print_pending_actions(out, data_set->actions);
pe_free_working_set(data_set);
return ETIME;
}
if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
sleep(WAIT_SLEEP_S);
}
/* Get latest transition graph */
pe_reset_working_set(data_set);
rc = update_working_set_from_cib(out, data_set, cib);
if (rc != pcmk_rc_ok) {
pe_free_working_set(data_set);
return rc;
}
pcmk__schedule_actions(data_set->input,
pe_flag_no_counts|pe_flag_no_compat, data_set);
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
* could come to different conclusions about what actions need to be
* done. Warn the user in this case.
*
* @TODO A possible long-term solution would be to reimplement the
* wait as a new controller operation that would be forwarded to the
* DC. However, that would have potential problems of its own.
*/
const char *dc_version = g_hash_table_lookup(data_set->config_hash,
"dc-version");
if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
out->info(out, "warning: wait option may not work properly in "
"mixed-version cluster");
printed_version_warning = true;
}
}
} while (actions_are_pending(data_set->actions));
pe_free_working_set(data_set);
return rc;
}
static const char *
get_action(const char *rsc_action) {
const char *action = NULL;
if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
action = PCMK_ACTION_VALIDATE_ALL;
} else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
action = PCMK_ACTION_MONITOR;
} else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
"force-demote", "force-promote", NULL)) {
action = rsc_action+6;
} else {
action = rsc_action;
}
return action;
}
/*!
* \brief Set up environment variables as expected by resource agents
*
* When the cluster executes resource agents, it adds certain environment
* variables (directly or via resource meta-attributes) expected by some
* resource agents. Add the essential ones that many resource agents expect, so
* the behavior is the same for command-line execution.
*
* \param[in,out] params Resource parameters that will be passed to agent
* \param[in] timeout_ms Action timeout (in milliseconds)
* \param[in] check_level OCF check level
* \param[in] verbosity Verbosity level
*/
static void
set_agent_environment(GHashTable *params, int timeout_ms, int check_level,
int verbosity)
{
g_hash_table_insert(params, strdup("CRM_meta_timeout"),
crm_strdup_printf("%d", timeout_ms));
g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION),
strdup(CRM_FEATURE_SET));
if (check_level >= 0) {
char *level = crm_strdup_printf("%d", check_level);
setenv("OCF_CHECK_LEVEL", level, 1);
free(level);
}
setenv("HA_debug", (verbosity > 0)? "1" : "0", 1);
if (verbosity > 1) {
setenv("OCF_TRACE_RA", "1", 1);
}
/* A resource agent using the standard ocf-shellfuncs library will not print
* messages to stderr if it doesn't have a controlling terminal (e.g. if
* crm_resource is called via script or ssh). This forces it to do so.
*/
setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
}
/*!
* \internal
* \brief Apply command-line overrides to resource parameters
*
* \param[in,out] params Parameters to be passed to agent
* \param[in] overrides Parameters to override (or NULL if none)
*/
static void
apply_overrides(GHashTable *params, GHashTable *overrides)
{
if (overrides != NULL) {
GHashTableIter iter;
char *name = NULL;
char *value = NULL;
g_hash_table_iter_init(&iter, overrides);
while (g_hash_table_iter_next(&iter, (gpointer *) &name,
(gpointer *) &value)) {
g_hash_table_replace(params, strdup(name), strdup(value));
}
}
}
crm_exit_t
cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
const char *rsc_class, const char *rsc_prov,
const char *rsc_type, const char *rsc_action,
GHashTable *params, GHashTable *override_hash,
int timeout_ms, int resource_verbose, gboolean force,
int check_level)
{
const char *class = rsc_class;
const char *action = get_action(rsc_action);
crm_exit_t exit_code = CRM_EX_OK;
svc_action_t *op = NULL;
// If no timeout was provided, use the same default as the cluster
if (timeout_ms == 0) {
timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
set_agent_environment(params, timeout_ms, check_level, resource_verbose);
apply_overrides(params, override_hash);
op = services__create_resource_action(rsc_name? rsc_name : "test",
rsc_class, rsc_prov, rsc_type, action,
0, timeout_ms, params, 0);
if (op == NULL) {
out->err(out, "Could not execute %s using %s%s%s:%s: %s",
action, rsc_class, (rsc_prov? ":" : ""),
(rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM));
g_hash_table_destroy(params);
return CRM_EX_OSERR;
}
if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
class = resources_find_service_class(rsc_type);
}
if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF,
PCMK_RESOURCE_CLASS_LSB, NULL)) {
services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR,
"Manual execution of the %s standard is "
"unsupported", pcmk__s(class, "unspecified"));
}
if (op->rc != PCMK_OCF_UNKNOWN) {
exit_code = op->rc;
goto done;
}
services_action_sync(op);
// Map results to OCF codes for consistent reporting to user
{
enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc);
// Cast variable instead of function return to keep compilers happy
exit_code = (crm_exit_t) ocf_code;
}
done:
out->message(out, "resource-agent-action", resource_verbose, rsc_class,
rsc_prov, rsc_type, rsc_name, rsc_action, override_hash,
exit_code, op->status, services__exit_reason(op),
op->stdout_data, op->stderr_data);
services_action_free(op);
return exit_code;
}
crm_exit_t
cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
int timeout_ms, cib_t * cib, pe_working_set_t *data_set,
int resource_verbose, gboolean force, int check_level)
{
pcmk__output_t *out = data_set->priv;
crm_exit_t exit_code = CRM_EX_OK;
const char *rid = NULL;
const char *rtype = NULL;
const char *rprov = NULL;
const char *rclass = NULL;
GHashTable *params = NULL;
if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
"force-promote", NULL)) {
if(pe_rsc_is_clone(rsc)) {
GList *nodes = cli_resource_search(rsc, requested_name, data_set);
if(nodes != NULL && force == FALSE) {
out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
rsc_action, rsc->id);
out->err(out, "Try setting target-role=Stopped first or specifying "
"the force option");
return CRM_EX_UNSAFE;
}
g_list_free_full(nodes, free);
}
}
if(pe_rsc_is_clone(rsc)) {
/* Grab the first child resource in the hope it's not a group */
rsc = rsc->children->data;
}
if(rsc->variant == pe_group) {
out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
} else if (rsc->variant == pe_container || pe_rsc_is_bundled(rsc)) {
out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
}
rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
params = generate_resource_params(rsc, NULL /* @TODO use local node */,
data_set);
if (timeout_ms == 0) {
timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), data_set);
}
rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action,
params, override_hash, timeout_ms,
resource_verbose, force, check_level);
return exit_code;
}
// \return Standard Pacemaker return code
int
cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
const char *host_name, const char *move_lifetime, cib_t *cib,
int cib_options, pe_working_set_t *data_set,
gboolean promoted_role_only, gboolean force)
{
pcmk__output_t *out = data_set->priv;
int rc = pcmk_rc_ok;
unsigned int count = 0;
pe_node_t *current = NULL;
pe_node_t *dest = pe_find_node(data_set->nodes, host_name);
bool cur_is_dest = false;
if (dest == NULL) {
return pcmk_rc_node_unknown;
}
if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
const pe_resource_t *p = pe__const_top_resource(rsc, false);
if (pcmk_is_set(p->flags, pe_rsc_promotable)) {
out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
rsc_id = p->id;
rsc = p;
} else {
out->info(out, "Ignoring --promoted option: %s is not promotable",
rsc_id);
promoted_role_only = FALSE;
}
}
current = pe__find_active_requires(rsc, &count);
if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
unsigned int promoted_count = 0;
pe_node_t *promoted_node = NULL;
for (const GList *iter = rsc->children; iter; iter = iter->next) {
const pe_resource_t *child = (const pe_resource_t *) iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
if (child_role == pcmk_role_promoted) {
rsc = child;
promoted_node = pe__current_node(child);
promoted_count++;
}
}
if (promoted_role_only || (promoted_count != 0)) {
count = promoted_count;
current = promoted_node;
}
}
if (count > 1) {
if (pe_rsc_is_clone(rsc)) {
current = NULL;
} else {
return pcmk_rc_multiple;
}
}
if (current && (current->details == dest->details)) {
cur_is_dest = true;
if (force) {
crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
rsc_id, promoted_role_only?"promoted":"active",
pe__node_name(dest));
} else {
return pcmk_rc_already;
}
}
/* Clear any previous prefer constraints across all nodes. */
cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, false, force);
/* Clear any previous ban constraints on 'dest'. */
cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib,
cib_options, TRUE, force);
/* Record an explicit preference for 'dest' */
rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
cib, cib_options, promoted_role_only);
crm_trace("%s%s now prefers %s%s",
rsc->id, (promoted_role_only? " (promoted)" : ""),
pe__node_name(dest), force?"(forced)":"");
/* only ban the previous location if current location != destination location.
* it is possible to use -M to enforce a location without regard of where the
* resource is currently located */
if (force && !cur_is_dest) {
/* Ban the original location if possible */
if(current) {
(void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
NULL, cib, cib_options, promoted_role_only);
} else if(count > 1) {
out->info(out, "Resource '%s' is currently %s in %d locations. "
"One may now move to %s",
rsc_id, (promoted_role_only? "promoted" : "active"),
count, pe__node_name(dest));
out->info(out, "To prevent '%s' from being %s at a specific location, "
"specify a node.",
rsc_id, (promoted_role_only? "promoted" : "active"));
} else {
crm_trace("Not banning %s from its current location: not active", rsc_id);
}
}
return rc;
}
diff --git a/xml/README.md b/xml/README.md
index e32edc2fc2..a3a1973dfa 100644
--- a/xml/README.md
+++ b/xml/README.md
@@ -1,136 +1,135 @@
# Schema Reference
Pacemaker's XML schema has a version of its own, independent of the version of
Pacemaker itself.
## Versioned Schema Evolution
A versioned schema offers transparent backward and forward compatibility.
- It reflects the timeline of schema-backed features (introduction,
changes to the syntax, possibly deprecation) through the versioned
stable schema increments, while keeping schema versions used by default
by older Pacemaker versions untouched.
- Pacemaker internally uses the latest stable schema version, and relies on
supplemental transformations to promote cluster configurations based on
older, incompatible schema versions into the desired form.
## Mapping Pacemaker Versions to Schema Versions
| Pacemaker | Latest Schema | Changed
| --------- | ------------- | ----------------------------------------------
| `2.1.5` | `3.9` | `alerts`, `constraints`, `nodes`, `nvset`,
| | | `options`, `resources`, `rule`
| `2.1.3` | `3.8` | `acls`
| `2.1.0` | `3.7` | `constraints`, `resources`
| `2.0.5` | `3.5` | `api`, `resources`, `rule`
| `2.0.4` | `3.3` | `tags`
| `2.0.1` | `3.2` | `resources`
| `2.0.0` | `3.1` | `constraints`, `resources`
| `1.1.18` | `2.10` | `resources`, `alerts`
| `1.1.17` | `2.9` | `resources`, `rule`
| `1.1.16` | `2.6` | `constraints`
| `1.1.15` | `2.5` | `alerts`
| `1.1.14` | `2.4` | `fencing`
| `1.1.13` | `2.3` | `constraints`
| `1.1.12` | `2.0` | `nodes`, `nvset`, `resources`, `tags`, `acls`
| `1.1.8` | `1.2` |
## Schema generation
Each logical portion of the schema goes into its own RNG file, named like
`${base}-${X}.${Y}.rng`. `${base}` identifies the portion of the schema
(e.g. constraints, resources); ${X}.${Y} is the latest schema version that
contained changes in this portion of the schema.
The complete, overall schema, `pacemaker-${X}.${Y}.rng`, is automatically
generated from the other files via the Makefile.
# Updating schema files #
## New features ##
The current schema version is determined at runtime when
crm\_schema\_init() scans the CRM\_SCHEMA\_DIRECTORY.
It will have the form `pacemaker-${X}.${Y}` and the highest
`${X}.${Y}` wins.
### Simple Additions
When the new syntax is a simple addition to the previous one, create a
new entry, incrementing `${Y}`.
### Feature Removal or otherwise Incompatible Changes
When the new syntax is not a simple addition to the previous one,
create a new entry, incrementing `${X}` and setting `${Y} = 0`.
An XSLT file is also required that converts an old syntax to the new
one and must be named `upgrade-${Xold}.${Yold}.xsl`.
See `xml/upgrade-1.3.xsl` for an example.
Since `xml/upgrade-2.10.xsl`, rather self-descriptive approach is taken,
separating metadata of the replacements and other modifications to
perform from the actual executive parts, which is leveraged, e.g., with
the on-the-fly overview as obtained with `./regression.sh -X test2to3`.
Also this was the first time particular key names of `nvpair`s,
i.e. below the granularity of the schemas so far, received attention,
and consequently, no longer expected names became systemically banned
in the after-upgrade schemas, using `` construct in the
data type specification pertaining the affected XML path.
The implied complexity also resulted in establishing a new compound,
stepwise transformation, alleviating the procedural burden from the
core upgrade recipe. In particular, `id-ref` based syntactic
simplification granted in the CIB format introduces nonnegligible
internal "noise" because of the extra indirection encumbered with
generally non-bijective character of such a scheme (context-dependent
interpretation). To reduce this strain, a symmetric arrangement is
introduced as a pair of _enter_/_leave_ (pre-upgrade/post-upgrade)
transformations where the latter is meant to eventually reversibly
restore what the former intentionally simplified (normalized) for
upgrade transformation's peruse. It's optional (even the post-upgrade
counterpart is optional alone) and depends on whether the suitable
files are found along the upgrade transformation itself: e.g., for
`upgrade-2.10.xsl`, such files are `upgrade-2.10-enter.xsl` and
`upgrade-2.10-leave.xsl`. Note that unfolding + refolding `id-ref`
shortcuts is just a practically imposed individual case of how to
reversibly make the configuration space tractable in the upgrade
itself, allowing for more sophistication down the road.
### General Procedure
1. Copy the most recent version of `${base}-*.rng` to `${base}-${X}.${Y}.rng`,
such that the new file name increments the highest number of any schema file,
not just the file being edited.
2. Commit the copy, e.g. `"Low: xml: clone ${base} schema in preparation for
changes"`. This way, the actual change will be obvious in the commit history.
3. Modify `${base}-${X}.${Y}.rng` as required.
4. If required, add an XSLT file, and update `xslt\_SCRIPTS` in `xml/Makefile.am`.
5. Commit.
-6. Run `make -C xml clean; make -C xml` to rebuild the schemas in the local
6. Run `make -C xml clean; make -C xml` to rebuild the schemas in the local
source directory.
7. The CIB validity and upgrade regression tests will break after the schema is
updated. Run `cts/cts-cli -s` to make the expected outputs reflect the
changes made so far, and run `git diff` to ensure that these changes look
sane. Finally, commit the changes.
8. Similarly, with the new major version `${X}`, it's advisable to refresh
scheduler tests at some point. See the instructions in `cts/README.md`.
## Using a New Schema
New features will not be available until the cluster administrator:
1. Updates all the nodes
2. Runs the equivalent of `cibadmin --upgrade --force`
## Random Notes
From the source directory, run `make -C xml diff` to see the changes
in the current schema (compared to the previous ones).
Alternatively, if the intention is to grok the overall historical schema
evolution, use `make -C xml fulldiff`.