Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index 3fe2f97e2a..ef288b5c41 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1478 +1,1478 @@
/*
* Copyright 2013-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
#include <crm/lrmd.h>
#include <crm/lrmd_internal.h>
#include <crm/services.h>
#include <pacemaker-controld.h>
#define REMOTE_LRMD_RA "remote"
/* The max start timeout before cmd retry */
#define MAX_START_TIMEOUT_MS 10000
#define cmd_set_flags(cmd, flags_to_set) do { \
(cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Remote command", (cmd)->rsc_id, (cmd)->status, \
(flags_to_set), #flags_to_set); \
} while (0)
#define cmd_clear_flags(cmd, flags_to_clear) do { \
(cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Remote command", (cmd)->rsc_id, (cmd)->status, \
(flags_to_clear), #flags_to_clear); \
} while (0)
enum remote_cmd_status {
cmd_reported_success = (1 << 0),
cmd_cancel = (1 << 1),
};
typedef struct remote_ra_cmd_s {
/*! the local node the cmd is issued from */
char *owner;
/*! the remote node the cmd is executed on */
char *rsc_id;
/*! the action to execute */
char *action;
/*! some string the client wants us to give it back */
char *userdata;
/*! start delay in ms */
int start_delay;
/*! timer id used for start delay. */
int delay_id;
/*! timeout in ms for cmd */
int timeout;
int remaining_timeout;
/*! recurring interval in ms */
guint interval_ms;
/*! interval timer id */
int interval_id;
int monitor_timeout_id;
int takeover_timeout_id;
/*! action parameters */
lrmd_key_value_t *params;
pcmk__action_result_t result;
int call_id;
time_t start_time;
uint32_t status;
} remote_ra_cmd_t;
#define lrm_remote_set_flags(lrm_state, flags_to_set) do { \
lrm_state_t *lrm = (lrm_state); \
remote_ra_data_t *ra = lrm->remote_ra_data; \
ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
lrm->node_name, ra->status, \
(flags_to_set), #flags_to_set); \
} while (0)
#define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \
lrm_state_t *lrm = (lrm_state); \
remote_ra_data_t *ra = lrm->remote_ra_data; \
ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
lrm->node_name, ra->status, \
(flags_to_clear), #flags_to_clear); \
} while (0)
enum remote_status {
expect_takeover = (1 << 0),
takeover_complete = (1 << 1),
remote_active = (1 << 2),
/* Maintenance mode is difficult to determine from the controller's context,
* so we have it signalled back with the transition from the scheduler.
*/
remote_in_maint = (1 << 3),
/* Similar for whether we are controlling a guest node or remote node.
* Fortunately there is a meta-attribute in the transition already and
* as the situation doesn't change over time we can use the
* resource start for noting down the information for later use when
* the attributes aren't at hand.
*/
controlling_guest = (1 << 4),
};
typedef struct remote_ra_data_s {
crm_trigger_t *work;
remote_ra_cmd_t *cur_cmd;
GList *cmds;
GList *recurring_cmds;
uint32_t status;
} remote_ra_data_t;
static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
static GList *fail_all_monitor_cmds(GList * list);
static void
free_cmd(gpointer user_data)
{
remote_ra_cmd_t *cmd = user_data;
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
}
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
}
if (cmd->takeover_timeout_id) {
g_source_remove(cmd->takeover_timeout_id);
}
free(cmd->owner);
free(cmd->rsc_id);
free(cmd->action);
free(cmd->userdata);
pcmk__reset_result(&(cmd->result));
lrmd_key_value_freeall(cmd->params);
free(cmd);
}
static int
generate_callid(void)
{
static int remote_ra_callid = 0;
remote_ra_callid++;
if (remote_ra_callid <= 0) {
remote_ra_callid = 1;
}
return remote_ra_callid;
}
static gboolean
recurring_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->interval_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static gboolean
start_delay_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->delay_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static bool
should_purge_attributes(crm_node_t *node)
{
bool purge = true;
crm_node_t *conn_node = NULL;
lrm_state_t *connection_rsc = NULL;
if (!node->conn_host) {
return purge;
}
/* Get the node that was hosting the remote connection resource from the
* peer cache. That's the one we really care about here.
*/
conn_node = pcmk__get_node(0, node->conn_host, NULL,
pcmk__node_search_cluster);
if (conn_node == NULL) {
return purge;
}
/* Check the uptime of connection_rsc. If it hasn't been running long
* enough, set purge=true. "Long enough" means it started running earlier
* than the timestamp when we noticed it went away in the first place.
*/
connection_rsc = lrm_state_find(node->uname);
if (connection_rsc != NULL) {
lrmd_t *lrm = connection_rsc->conn;
time_t uptime = lrmd__uptime(lrm);
time_t now = time(NULL);
/* Add 20s of fuzziness to give corosync a while to notice the remote
* host is gone. On various error conditions (failure to get uptime,
* peer_lost isn't set) we default to purging.
*/
if (uptime > 0 &&
conn_node->peer_lost > 0 &&
uptime + 20 >= now - conn_node->peer_lost) {
purge = false;
}
}
return purge;
}
static enum controld_section_e
section_to_delete(bool purge)
{
if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
if (purge) {
return controld_section_all_unlocked;
} else {
return controld_section_lrm_unlocked;
}
} else {
if (purge) {
return controld_section_all;
} else {
return controld_section_lrm;
}
}
}
static void
purge_remote_node_attrs(int call_opt, crm_node_t *node)
{
bool purge = should_purge_attributes(node);
enum controld_section_e section = section_to_delete(purge);
/* Purge node from attrd's memory */
if (purge) {
update_attrd_remote_node_removed(node->uname, NULL);
}
controld_delete_node_state(node->uname, section, call_opt);
}
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node joining
*
* \param[in] node_name Name of newly integrated pacemaker_remote node
*/
static void
remote_node_up(const char *node_name)
{
int call_opt;
xmlNode *update, *state;
crm_node_t *node;
lrm_state_t *connection_rsc = NULL;
CRM_CHECK(node_name != NULL, return);
crm_info("Announcing Pacemaker Remote node %s", node_name);
call_opt = crmd_cib_smart_opt();
/* Delete node's probe_complete attribute. This serves two purposes:
*
* - @COMPAT DCs < 1.1.14 in a rolling upgrade might use it
* - deleting it (or any attribute for that matter) here ensures the
* attribute manager learns the node is remote
*/
update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
/* Ensure node is in the remote peer cache with member status */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
purge_remote_node_attrs(call_opt, node);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
/* Apply any start state that we were given from the environment on the
* remote node.
*/
connection_rsc = lrm_state_find(node->uname);
if (connection_rsc != NULL) {
lrmd_t *lrm = connection_rsc->conn;
const char *start_state = lrmd__node_start_state(lrm);
if (start_state) {
set_join_state(start_state, node->uname, node->uuid, true);
}
}
/* pacemaker_remote nodes don't participate in the membership layer,
* so cluster nodes don't automatically get notified when they come and go.
* We send a cluster message to the DC, and update the CIB node state entry,
* so the DC will get it sooner (via message) or later (via CIB refresh),
* and any other interested parties can query the CIB.
*/
broadcast_remote_state_message(node_name, true);
update = create_xml_node(NULL, PCMK_XE_STATUS);
state = create_node_state_update(node, node_update_cluster, update,
__func__);
/* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
* needs to be fenced, this flag will allow various actions to determine
* whether the fencing has happened yet.
*/
crm_xml_add(state, XML_NODE_IS_FENCED, "0");
/* TODO: If the remote connection drops, and this (async) CIB update either
* failed or has not yet completed, later actions could mistakenly think the
* node has already been fenced (if the XML_NODE_IS_FENCED attribute was
* previously set, because it won't have been cleared). This could prevent
* actual fencing or allow recurring monitor failures to be cleared too
* soon. Ideally, we wouldn't rely on the CIB for the fenced status.
*/
controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL);
free_xml(update);
}
enum down_opts {
DOWN_KEEP_LRM,
DOWN_ERASE_LRM
};
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node leaving
*
* \param[in] node_name Name of lost node
* \param[in] opts Whether to keep or erase LRM history
*/
static void
remote_node_down(const char *node_name, const enum down_opts opts)
{
xmlNode *update;
int call_opt = crmd_cib_smart_opt();
crm_node_t *node;
/* Purge node from attrd's memory */
update_attrd_remote_node_removed(node_name, NULL);
/* Normally, only node attributes should be erased, and the resource history
* should be kept until the node comes back up. However, after a successful
* fence, we want to clear the history as well, so we don't think resources
* are still running on the node.
*/
if (opts == DOWN_ERASE_LRM) {
controld_delete_node_state(node_name, controld_section_all, call_opt);
} else {
controld_delete_node_state(node_name, controld_section_attrs, call_opt);
}
/* Ensure node is in the remote peer cache with lost state */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
/* Notify DC */
broadcast_remote_state_message(node_name, false);
/* Update CIB node state */
update = create_xml_node(NULL, PCMK_XE_STATUS);
create_node_state_update(node, node_update_cluster, update, __func__);
controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL);
free_xml(update);
}
/*!
* \internal
* \brief Handle effects of a remote RA command on node state
*
* \param[in] cmd Completed remote RA command
*/
static void
check_remote_node_state(const remote_ra_cmd_t *cmd)
{
/* Only successful actions can change node state */
if (!pcmk__result_ok(&(cmd->result))) {
return;
}
if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
remote_node_up(cmd->rsc_id);
} else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_casei)) {
/* After a successful migration, we don't need to do remote_node_up()
* because the DC already knows the node is up, and we don't want to
* clear LRM history etc. We do need to add the remote node to this
* host's remote peer cache, because (unless it happens to be DC)
* it hasn't been tracking the remote node, and other code relies on
* the cache to distinguish remote nodes from unseen cluster nodes.
*/
crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
} else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
if (ra_data) {
if (!pcmk_is_set(ra_data->status, takeover_complete)) {
/* Stop means down if we didn't successfully migrate elsewhere */
remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
} else if (AM_I_DC == FALSE) {
/* Only the connection host and DC track node state,
* so if the connection migrated elsewhere and we aren't DC,
* un-cache the node, so we don't have stale info
*/
crm_remote_peer_cache_remove(cmd->rsc_id);
}
}
}
/* We don't do anything for successful monitors, which is correct for
* routine recurring monitors, and for monitors on nodes where the
* connection isn't supposed to be (the cluster will stop the connection in
* that case). However, if the initial probe finds the connection already
* active on the node where we want it, we probably should do
* remote_node_up(). Unfortunately, we can't distinguish that case here.
* Given that connections have to be initiated by the cluster, the chance of
* that should be close to zero.
*/
}
static void
report_remote_ra_result(remote_ra_cmd_t * cmd)
{
lrmd_event_data_t op = { 0, };
check_remote_node_state(cmd);
op.type = lrmd_event_exec_complete;
op.rsc_id = cmd->rsc_id;
op.op_type = cmd->action;
op.user_data = cmd->userdata;
op.timeout = cmd->timeout;
op.interval_ms = cmd->interval_ms;
op.t_run = (unsigned int) cmd->start_time;
op.t_rcchange = (unsigned int) cmd->start_time;
lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status,
cmd->result.exit_reason);
if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) {
op.t_rcchange = (unsigned int) time(NULL);
/* This edge case will likely never ever occur, but if it does the
* result is that a failure will not be processed correctly. This is only
* remotely possible because we are able to detect a connection resource's tcp
* connection has failed at any moment after start has completed. The actual
* recurring operation is just a connectivity ping.
*
* basically, we are not guaranteed that the first successful monitor op and
* a subsequent failed monitor op will not occur in the same timestamp. We have to
* make it look like the operations occurred at separate times though. */
if (op.t_rcchange == op.t_run) {
op.t_rcchange++;
}
}
if (cmd->params) {
lrmd_key_value_t *tmp;
op.params = pcmk__strkey_table(free, free);
for (tmp = cmd->params; tmp; tmp = tmp->next) {
g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
}
}
op.call_id = cmd->call_id;
op.remote_nodename = cmd->owner;
lrm_op_callback(&op);
if (op.params) {
g_hash_table_destroy(op.params);
}
lrmd__reset_result(&op);
}
static void
update_remaining_timeout(remote_ra_cmd_t * cmd)
{
cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
}
static gboolean
retry_start_cmd_cb(gpointer data)
{
lrm_state_t *lrm_state = data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd = NULL;
int rc = ETIME;
if (!ra_data || !ra_data->cur_cmd) {
return FALSE;
}
cmd = ra_data->cur_cmd;
if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
return FALSE;
}
update_remaining_timeout(cmd);
if (cmd->remaining_timeout > 0) {
rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
} else {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"Not enough time remains to retry remote connection");
}
if (rc != pcmk_rc_ok) {
report_remote_ra_result(cmd);
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
ra_data->cur_cmd = NULL;
free_cmd(cmd);
} else {
/* wait for connection event */
}
return FALSE;
}
static gboolean
connection_takeover_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
crm_info("takeover event timed out for node %s", cmd->rsc_id);
cmd->takeover_timeout_id = 0;
lrm_state = lrm_state_find(cmd->rsc_id);
handle_remote_ra_stop(lrm_state, cmd);
free_cmd(cmd);
return FALSE;
}
static gboolean
monitor_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
lrm_state = lrm_state_find(cmd->rsc_id);
crm_info("Timed out waiting for remote poke response from %s%s",
cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
cmd->monitor_timeout_id = 0;
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT,
"Remote executor did not respond");
if (lrm_state && lrm_state->remote_ra_data) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (ra_data->cur_cmd == cmd) {
ra_data->cur_cmd = NULL;
}
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
}
report_remote_ra_result(cmd);
free_cmd(cmd);
if(lrm_state) {
lrm_state_disconnect(lrm_state);
}
return FALSE;
}
static void
synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
{
lrmd_event_data_t op = { 0, };
if (lrm_state == NULL) {
/* if lrm_state not given assume local */
lrm_state = lrm_state_find(controld_globals.our_nodename);
}
CRM_ASSERT(lrm_state != NULL);
op.type = lrmd_event_exec_complete;
op.rsc_id = rsc_id;
op.op_type = op_type;
op.t_run = (unsigned int) time(NULL);
op.t_rcchange = op.t_run;
op.call_id = generate_callid();
lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
process_lrm_event(lrm_state, &op, NULL, NULL);
}
void
remote_lrm_op_callback(lrmd_event_data_t * op)
{
gboolean cmd_handled = FALSE;
lrm_state_t *lrm_state = NULL;
remote_ra_data_t *ra_data = NULL;
remote_ra_cmd_t *cmd = NULL;
crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
"(%d) status=%s (%d)",
(op->op_type? op->op_type : ""), (op->op_type? " " : ""),
lrmd_event_type2str(op->type), op->remote_nodename,
services_ocf_exitcode_str(op->rc), op->rc,
pcmk_exec_status_str(op->op_status), op->op_status);
lrm_state = lrm_state_find(op->remote_nodename);
if (!lrm_state || !lrm_state->remote_ra_data) {
crm_debug("No state information found for remote connection event");
return;
}
ra_data = lrm_state->remote_ra_data;
if (op->type == lrmd_event_new_client) {
// Another client has connected to the remote daemon
if (pcmk_is_set(ra_data->status, expect_takeover)) {
// Great, we knew this was coming
lrm_remote_clear_flags(lrm_state, expect_takeover);
lrm_remote_set_flags(lrm_state, takeover_complete);
} else {
crm_err("Disconnecting from Pacemaker Remote node %s due to "
"unexpected client takeover", op->remote_nodename);
/* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
/* Do not free lrm_state->conn yet. */
/* It'll be freed in the following stop action. */
lrm_state_disconnect_only(lrm_state);
}
return;
}
/* filter all EXEC events up */
if (op->type == lrmd_event_exec_complete) {
if (pcmk_is_set(ra_data->status, takeover_complete)) {
crm_debug("ignoring event, this connection is taken over by another node");
} else {
lrm_op_callback(op);
}
return;
}
if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
if (!pcmk_is_set(ra_data->status, remote_active)) {
crm_debug("Disconnection from Pacemaker Remote node %s complete",
lrm_state->node_name);
} else if (!remote_ra_is_in_maintenance(lrm_state)) {
crm_err("Lost connection to Pacemaker Remote node %s",
lrm_state->node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
} else {
crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
lrm_state->node_name);
/* Do roughly what a 'stop' on the remote-resource would do */
handle_remote_ra_stop(lrm_state, NULL);
remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
/* now fake the reply of a successful 'stop' */
synthesize_lrmd_success(NULL, lrm_state->node_name,
PCMK_ACTION_STOP);
}
return;
}
if (!ra_data->cur_cmd) {
crm_debug("no event to match");
return;
}
cmd = ra_data->cur_cmd;
/* Start actions and migrate from actions complete after connection
* comes back to us. */
if ((op->type == lrmd_event_connect)
&& pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
if (op->connection_rc < 0) {
update_remaining_timeout(cmd);
if ((op->connection_rc == -ENOKEY)
|| (op->connection_rc == -EKEYREJECTED)) {
// Hard error, don't retry
pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM,
PCMK_EXEC_ERROR,
pcmk_strerror(op->connection_rc));
} else if (cmd->remaining_timeout > 3000) {
crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
return;
} else {
crm_trace("can't reschedule start, remaining timeout too small %d",
cmd->remaining_timeout);
pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"%s without enough time to retry",
pcmk_strerror(op->connection_rc));
}
} else {
lrm_state_reset_tables(lrm_state, TRUE);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
lrm_remote_set_flags(lrm_state, remote_active);
}
crm_debug("Remote connection event matched %s action", cmd->action);
report_remote_ra_result(cmd);
cmd_handled = TRUE;
} else if ((op->type == lrmd_event_poke)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
cmd->monitor_timeout_id = 0;
}
/* Only report success the first time, after that only worry about failures.
* For this function, if we get the poke pack, it is always a success. Pokes
* only fail if the send fails, or the response times out. */
if (!pcmk_is_set(cmd->status, cmd_reported_success)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
cmd_set_flags(cmd, cmd_reported_success);
}
crm_debug("Remote poke event matched %s action", cmd->action);
/* success, keep rescheduling if interval is present. */
if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) {
ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
cmd->interval_id = g_timeout_add(cmd->interval_ms,
recurring_helper, cmd);
cmd = NULL; /* prevent free */
}
cmd_handled = TRUE;
} else if ((op->type == lrmd_event_disconnect)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
if (pcmk_is_set(ra_data->status, remote_active) &&
!pcmk_is_set(cmd->status, cmd_cancel)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR,
"Remote connection unexpectedly dropped "
"during monitor");
report_remote_ra_result(cmd);
crm_err("Remote connection to %s unexpectedly dropped during monitor",
lrm_state->node_name);
}
cmd_handled = TRUE;
} else if ((op->type == lrmd_event_new_client)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
pcmk__str_casei)) {
handle_remote_ra_stop(lrm_state, cmd);
cmd_handled = TRUE;
} else {
crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
}
if (cmd_handled) {
ra_data->cur_cmd = NULL;
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
free_cmd(cmd);
}
}
static void
handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
{
remote_ra_data_t *ra_data = NULL;
CRM_ASSERT(lrm_state);
ra_data = lrm_state->remote_ra_data;
if (!pcmk_is_set(ra_data->status, takeover_complete)) {
/* delete pending ops when ever the remote connection is intentionally stopped */
g_hash_table_remove_all(lrm_state->active_ops);
} else {
/* we no longer hold the history if this connection has been migrated,
* however, we keep metadata cache for future use */
lrm_state_reset_tables(lrm_state, FALSE);
}
lrm_remote_clear_flags(lrm_state, remote_active);
lrm_state_disconnect(lrm_state);
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
ra_data->cmds = NULL;
ra_data->recurring_cmds = NULL;
ra_data->cur_cmd = NULL;
if (cmd) {
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
}
}
// \return Standard Pacemaker return code
static int
handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
{
const char *server = NULL;
lrmd_key_value_t *tmp = NULL;
int port = 0;
int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
int rc = pcmk_rc_ok;
for (tmp = cmd->params; tmp; tmp = tmp->next) {
if (pcmk__strcase_any_of(tmp->key,
PCMK_REMOTE_RA_ADDR, PCMK_REMOTE_RA_SERVER,
NULL)) {
server = tmp->value;
} else if (pcmk__str_eq(tmp->key, PCMK_REMOTE_RA_PORT,
pcmk__str_none)) {
port = atoi(tmp->value);
} else if (pcmk__str_eq(tmp->key, CRM_META "_" PCMK__META_CONTAINER,
pcmk__str_none)) {
lrm_remote_set_flags(lrm_state, controlling_guest);
}
}
rc = controld_connect_remote_executor(lrm_state, server, port,
timeout_used);
if (rc != pcmk_rc_ok) {
pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR,
"Could not connect to Pacemaker Remote node %s: %s",
lrm_state->node_name, pcmk_rc_str(rc));
}
return rc;
}
static gboolean
handle_remote_ra_exec(gpointer user_data)
{
int rc = 0;
lrm_state_t *lrm_state = user_data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd;
GList *first = NULL;
if (ra_data->cur_cmd) {
/* still waiting on previous cmd */
return TRUE;
}
while (ra_data->cmds) {
first = ra_data->cmds;
cmd = first->data;
if (cmd->delay_id) {
/* still waiting for start delay timer to trip */
return TRUE;
}
ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
g_list_free_1(first);
if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
if (handle_remote_ra_start(lrm_state, cmd,
cmd->timeout) == pcmk_rc_ok) {
/* take care of this later when we get async connection result */
crm_debug("Initiated async remote connection, %s action will complete after connect event",
cmd->action);
ra_data->cur_cmd = cmd;
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) {
if (lrm_state_is_connected(lrm_state) == TRUE) {
rc = lrm_state_poke_connection(lrm_state);
if (rc < 0) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, pcmk_strerror(rc));
}
} else {
rc = -1;
pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING,
PCMK_EXEC_DONE, "Remote connection inactive");
}
if (rc == 0) {
crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
cmd->rsc_id);
ra_data->cur_cmd = cmd;
cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) {
if (pcmk_is_set(ra_data->status, expect_takeover)) {
/* briefly wait on stop for the takeover event to occur. If the
* takeover event does not occur during the wait period, that's fine.
* It just means that the remote-node's lrm_status section is going to get
* cleared which will require all the resources running in the remote-node
* to be explicitly re-detected via probe actions. If the takeover does occur
* successfully, then we can leave the status section intact. */
cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
ra_data->cur_cmd = cmd;
return TRUE;
}
handle_remote_ra_stop(lrm_state, cmd);
} else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) {
lrm_remote_clear_flags(lrm_state, takeover_complete);
lrm_remote_set_flags(lrm_state, expect_takeover);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
} else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD,
PCMK_ACTION_RELOAD_AGENT, NULL)) {
/* Currently the only reloadable parameter is
* PCMK_REMOTE_RA_RECONNECT_INTERVAL, which is only used by the
* scheduler via the CIB, so reloads are a no-op.
*
* @COMPAT DC <2.1.0: We only need to check for "reload" in case
* we're in a rolling upgrade with a DC scheduling "reload" instead
* of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
* so this would work for that purpose as well.
*/
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
}
free_cmd(cmd);
}
return TRUE;
}
static void
remote_ra_data_init(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = NULL;
if (lrm_state->remote_ra_data) {
return;
}
ra_data = calloc(1, sizeof(remote_ra_data_t));
ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
lrm_state->remote_ra_data = ra_data;
}
void
remote_ra_cleanup(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (!ra_data) {
return;
}
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
mainloop_destroy_trigger(ra_data->work);
free(ra_data);
lrm_state->remote_ra_data = NULL;
}
gboolean
is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
{
if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
return TRUE;
}
if ((id != NULL) && (lrm_state_find(id) != NULL)
&& !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) {
return TRUE;
}
return FALSE;
}
lrmd_rsc_info_t *
remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
{
lrmd_rsc_info_t *info = NULL;
if ((lrm_state_find(rsc_id))) {
info = calloc(1, sizeof(lrmd_rsc_info_t));
info->id = strdup(rsc_id);
info->type = strdup(REMOTE_LRMD_RA);
info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
info->provider = strdup("pacemaker");
}
return info;
}
static gboolean
is_remote_ra_supported_action(const char *action)
{
return pcmk__str_any_of(action,
PCMK_ACTION_START,
PCMK_ACTION_STOP,
PCMK_ACTION_MONITOR,
PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM,
PCMK_ACTION_RELOAD_AGENT,
PCMK_ACTION_RELOAD,
NULL);
}
static GList *
fail_all_monitor_cmds(GList * list)
{
GList *rm_list = NULL;
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms > 0)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
rm_list = g_list_append(rm_list, cmd);
}
}
for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, "Lost connection to remote executor");
crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
report_remote_ra_result(cmd);
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
/* frees only the list data, not the cmds */
g_list_free(rm_list);
return list;
}
static GList *
remove_cmd(GList * list, const char *action, guint interval_ms)
{
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
break;
}
cmd = NULL;
}
if (cmd) {
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
return list;
}
int
remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, guint interval_ms)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_data_t *ra_data = NULL;
connection_rsc = lrm_state_find(rsc_id);
if (!connection_rsc || !connection_rsc->remote_ra_data) {
return -EINVAL;
}
ra_data = connection_rsc->remote_ra_data;
ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
interval_ms);
if (ra_data->cur_cmd &&
(ra_data->cur_cmd->interval_ms == interval_ms) &&
(pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
cmd_set_flags(ra_data->cur_cmd, cmd_cancel);
}
return 0;
}
static remote_ra_cmd_t *
handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
const char *userdata)
{
GList *gIter = NULL;
remote_ra_cmd_t *cmd = NULL;
/* there are 3 places a potential duplicate monitor operation
* could exist.
* 1. recurring_cmds list. where the op is waiting for its next interval
* 2. cmds list, where the op is queued to get executed immediately
* 3. cur_cmd, which means the monitor op is in flight right now.
*/
if (interval_ms == 0) {
return NULL;
}
if (ra_data->cur_cmd &&
!pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
(ra_data->cur_cmd->interval_ms == interval_ms)
&& pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
cmd = ra_data->cur_cmd;
goto handle_dup;
}
for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
goto handle_dup;
}
}
for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_casei)) {
goto handle_dup;
}
}
return NULL;
handle_dup:
crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms);
/* update the userdata */
if (userdata) {
free(cmd->userdata);
cmd->userdata = strdup(userdata);
}
/* if we've already reported success, generate a new call id */
if (pcmk_is_set(cmd->status, cmd_reported_success)) {
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
cmd_clear_flags(cmd, cmd_reported_success);
}
/* if we have an interval_id set, that means we are in the process of
* waiting for this cmd's next interval. instead of waiting, cancel
* the timer and execute the action immediately */
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
cmd->interval_id = 0;
recurring_helper(cmd);
}
return cmd;
}
/*!
* \internal
* \brief Execute an action using the (internal) ocf:pacemaker:remote agent
*
* \param[in] lrm_state Executor state object for remote connection
* \param[in] rsc_id Connection resource ID
* \param[in] action Action to execute
* \param[in] userdata String to copy and pass to execution callback
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in] timeout_ms Action timeout (in milliseconds)
* \param[in] start_delay_ms Delay (in milliseconds) before executing action
* \param[in,out] params Connection resource parameters
* \param[out] call_id Where to store call ID on success
*
* \return Standard Pacemaker return code
* \note This takes ownership of \p params, which should not be used or freed
* after calling this function.
*/
int
controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id,
const char *action, const char *userdata,
guint interval_ms, int timeout_ms,
int start_delay_ms, lrmd_key_value_t *params,
int *call_id)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_cmd_t *cmd = NULL;
remote_ra_data_t *ra_data = NULL;
*call_id = 0;
CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL)
&& (userdata != NULL) && (call_id != NULL),
lrmd_key_value_freeall(params); return EINVAL);
if (!is_remote_ra_supported_action(action)) {
lrmd_key_value_freeall(params);
return EOPNOTSUPP;
}
connection_rsc = lrm_state_find(rsc_id);
if (connection_rsc == NULL) {
lrmd_key_value_freeall(params);
return ENOTCONN;
}
remote_ra_data_init(connection_rsc);
ra_data = connection_rsc->remote_ra_data;
cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
if (cmd) {
*call_id = cmd->call_id;
lrmd_key_value_freeall(params);
return pcmk_rc_ok;
}
cmd = calloc(1, sizeof(remote_ra_cmd_t));
if (cmd == NULL) {
lrmd_key_value_freeall(params);
return ENOMEM;
}
cmd->owner = strdup(lrm_state->node_name);
cmd->rsc_id = strdup(rsc_id);
cmd->action = strdup(action);
cmd->userdata = strdup(userdata);
if ((cmd->owner == NULL) || (cmd->rsc_id == NULL) || (cmd->action == NULL)
|| (cmd->userdata == NULL)) {
free_cmd(cmd);
lrmd_key_value_freeall(params);
return ENOMEM;
}
cmd->interval_ms = interval_ms;
cmd->timeout = timeout_ms;
cmd->start_delay = start_delay_ms;
cmd->params = params;
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
if (cmd->start_delay) {
cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
*call_id = cmd->call_id;
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Immediately fail all monitors of a remote node, if proxied here
*
* \param[in] node_name Name of pacemaker_remote node
*/
void
remote_ra_fail(const char *node_name)
{
lrm_state_t *lrm_state = lrm_state_find(node_name);
if (lrm_state && lrm_state_is_connected(lrm_state)) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
crm_info("Failing monitors on Pacemaker Remote node %s", node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
}
}
/* A guest node fencing implied by host fencing looks like:
*
* <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
* on_node="lxc1" on_node_uuid="lxc1">
* <attributes CRM_meta_on_node="lxc1" CRM_meta_on_node_uuid="lxc1"
* CRM_meta_stonith_action="off" crm_feature_set="3.0.12"/>
* <downed>
* <node id="lxc1"/>
* </downed>
* </pseudo_event>
*/
-#define XPATH_PSEUDO_FENCE "/" XML_GRAPH_TAG_PSEUDO_EVENT \
+#define XPATH_PSEUDO_FENCE "/" PCMK__XE_PSEUDO_EVENT \
"[@" PCMK_XA_OPERATION "='stonith']/" XML_GRAPH_TAG_DOWNED "/" PCMK_XE_NODE
/*!
* \internal
* \brief Check a pseudo-action for Pacemaker Remote node side effects
*
* \param[in,out] xml XML of pseudo-action to check
*/
void
remote_ra_process_pseudo(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
if (numXpathResults(search) == 1) {
xmlNode *result = getXpathResult(search, 0);
/* Normally, we handle the necessary side effects of a guest node stop
* action when reporting the remote agent's result. However, if the stop
* is implied due to fencing, it will be a fencing pseudo-event, and
* there won't be a result to report. Handle that case here.
*
* This will result in a duplicate call to remote_node_down() if the
* guest stop was real instead of implied, but that shouldn't hurt.
*
* There is still one corner case that isn't handled: if a guest node
* isn't running any resources when its host is fenced, it will appear
* to be cleanly stopped, so there will be no pseudo-fence, and our
* peer cache state will be incorrect unless and until the guest is
* recovered.
*/
if (result) {
const char *remote = ID(result);
if (remote) {
remote_node_down(remote, DOWN_ERASE_LRM);
}
}
}
freeXpathObject(search);
}
static void
remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
{
xmlNode *update, *state;
int call_opt;
crm_node_t *node;
call_opt = crmd_cib_smart_opt();
node = crm_remote_peer_get(lrm_state->node_name);
CRM_CHECK(node != NULL, return);
update = create_xml_node(NULL, PCMK_XE_STATUS);
state = create_node_state_update(node, node_update_none, update,
__func__);
crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
if (controld_update_cib(PCMK_XE_STATUS, update, call_opt,
NULL) == pcmk_rc_ok) {
/* TODO: still not 100% sure that async update will succeed ... */
if (maintenance) {
lrm_remote_set_flags(lrm_state, remote_in_maint);
} else {
lrm_remote_clear_flags(lrm_state, remote_in_maint);
}
}
free_xml(update);
}
-#define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
+#define XPATH_PSEUDO_MAINTENANCE "//" PCMK__XE_PSEUDO_EVENT \
"[@" PCMK_XA_OPERATION "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \
XML_GRAPH_TAG_MAINTENANCE
/*!
* \internal
* \brief Check a pseudo-action holding updates for maintenance state
*
* \param[in,out] xml XML of pseudo-action to check
*/
void
remote_ra_process_maintenance_nodes(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
if (numXpathResults(search) == 1) {
xmlNode *node;
int cnt = 0, cnt_remote = 0;
for (node = first_named_child(getXpathResult(search, 0), PCMK_XE_NODE);
node != NULL; node = crm_next_same_xml(node)) {
lrm_state_t *lrm_state = lrm_state_find(ID(node));
cnt++;
if (lrm_state && lrm_state->remote_ra_data &&
pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) {
int is_maint;
cnt_remote++;
pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE),
&is_maint, 0);
remote_ra_maintenance(lrm_state, is_maint);
}
}
crm_trace("Action holds %d nodes (%d remotes found) adjusting "
PCMK_OPT_MAINTENANCE_MODE,
cnt, cnt_remote);
}
freeXpathObject(search);
}
gboolean
remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return pcmk_is_set(ra_data->status, remote_in_maint);
}
gboolean
remote_ra_controlling_guest(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return pcmk_is_set(ra_data->status, controlling_guest);
}
diff --git a/include/crm_internal.h b/include/crm_internal.h
index fde4dde33b..d97aa683a9 100644
--- a/include/crm_internal.h
+++ b/include/crm_internal.h
@@ -1,218 +1,219 @@
/*
* Copyright 2006-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM_INTERNAL__H
# define CRM_INTERNAL__H
# ifndef PCMK__CONFIG_H
# define PCMK__CONFIG_H
# include <config.h>
# endif
# include <portability.h>
/* Our minimum glib dependency is 2.42. Define that as both the minimum and
* maximum glib APIs that are allowed (i.e. APIs that were already deprecated
* in 2.42, and APIs introduced after 2.42, cannot be used by Pacemaker code).
*/
#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_42
#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_42
# include <glib.h>
# include <stdbool.h>
# include <libxml/tree.h>
/* Public API headers can guard including deprecated API headers with this
* symbol, thus preventing internal code (which includes this header) from using
* deprecated APIs, while still allowing external code to use them by default.
*/
#define PCMK_ALLOW_DEPRECATED 0
# include <crm/lrmd.h>
# include <crm/common/logging.h>
# include <crm/common/logging_internal.h>
# include <crm/common/ipc_internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
# include <crm/common/xml_internal.h>
# include <crm/common/internal.h>
# include <locale.h>
# include <gettext.h>
#define N_(String) (String)
#ifdef ENABLE_NLS
# define _(String) gettext(String)
#else
# define _(String) (String)
#endif
/*
* XML element names used only by internal code
*/
#define PCMK__XE_ATTRIBUTES "attributes"
#define PCMK__XE_LRM "lrm"
#define PCMK__XE_LRM_RESOURCE "lrm_resource"
#define PCMK__XE_LRM_RESOURCES "lrm_resources"
#define PCMK__XE_LRM_RSC_OP "lrm_rsc_op"
#define PCMK__XE_NODE_STATE "node_state"
#define PCMK__XE_PARAM "param"
#define PCMK__XE_PING_RESPONSE "ping_response"
+#define PCMK__XE_PSEUDO_EVENT "pseudo_event"
#define PCMK__XE_RSC_OP "rsc_op"
#define PCMK__XE_TRANSITION_GRAPH "transition_graph"
// @COMPAT Deprecated since 2.1.7
#define PCMK__XE_DIFF_ADDED "diff-added"
// @COMPAT Deprecated since 2.1.7
#define PCMK__XE_DIFF_REMOVED "diff-removed"
/* @COMPAT Deprecated since 2.0.0; alias for <clone> with PCMK_META_PROMOTABLE
* set to "true"
*/
#define PCMK__XE_PROMOTABLE_LEGACY "master"
/*
* XML attribute names used only by internal code
*/
#define PCMK__XA_ATTR_DAMPENING "attr_dampening"
#define PCMK__XA_ATTR_FORCE "attrd_is_force_write"
#define PCMK__XA_ATTR_INTERVAL "attr_clear_interval"
#define PCMK__XA_ATTR_IS_PRIVATE "attr_is_private"
#define PCMK__XA_ATTR_IS_REMOTE "attr_is_remote"
#define PCMK__XA_ATTR_NAME "attr_name"
#define PCMK__XA_ATTR_NODE_ID "attr_host_id"
#define PCMK__XA_ATTR_NODE_NAME "attr_host"
#define PCMK__XA_ATTR_OPERATION "attr_clear_operation"
#define PCMK__XA_ATTR_PATTERN "attr_regex"
#define PCMK__XA_ATTR_RESOURCE "attr_resource"
#define PCMK__XA_ATTR_SECTION "attr_section"
#define PCMK__XA_ATTR_SET "attr_set"
#define PCMK__XA_ATTR_SET_TYPE "attr_set_type"
#define PCMK__XA_ATTR_SYNC_POINT "attr_sync_point"
#define PCMK__XA_ATTR_USER "attr_user"
#define PCMK__XA_ATTR_UUID "attr_key"
#define PCMK__XA_ATTR_VALUE "attr_value"
#define PCMK__XA_ATTR_VERSION "attr_version"
#define PCMK__XA_ATTR_WRITER "attr_writer"
#define PCMK__XA_CALL_ID "call-id"
#define PCMK__XA_CONFIG_ERRORS "config-errors"
#define PCMK__XA_CONFIG_WARNINGS "config-warnings"
#define PCMK__XA_CONFIRM "confirm"
#define PCMK__XA_CONN_HOST "connection_host"
#define PCMK__XA_CRMD "crmd"
#define PCMK__XA_CRMD_STATE "crmd_state"
#define PCMK__XA_CRM_HOST_TO "crm_host_to"
#define PCMK__XA_CRM_LIMIT_MAX "crm-limit-max"
#define PCMK__XA_CRM_LIMIT_MODE "crm-limit-mode"
#define PCMK__XA_CRM_SUBSYSTEM "crm_subsystem"
#define PCMK__XA_CRM_SYS_FROM "crm_sys_from"
#define PCMK__XA_CRM_SYS_TO "crm_sys_to"
#define PCMK__XA_CRM_TASK "crm_task"
#define PCMK__XA_CRM_TGRAPH_IN "crm-tgraph-in"
#define PCMK__XA_CRM_USER "crm_user"
#define PCMK__XA_DC_LEAVING "dc-leaving"
#define PCMK__XA_DIGEST "digest"
#define PCMK__XA_ELECTION_AGE_SEC "election-age-sec"
#define PCMK__XA_ELECTION_AGE_NANO_SEC "election-age-nano-sec"
#define PCMK__XA_ELECTION_ID "election-id"
#define PCMK__XA_ELECTION_OWNER "election-owner"
#define PCMK__XA_EXPECTED "expected"
#define PCMK__XA_FILE "file"
#define PCMK__XA_GRAPH_ERRORS "graph-errors"
#define PCMK__XA_GRAPH_WARNINGS "graph-warnings"
#define PCMK__XA_IN_CCM "in_ccm"
#define PCMK__XA_JOIN "join"
#define PCMK__XA_JOIN_ID "join_id"
#define PCMK__XA_LONG_ID "long-id"
#define PCMK__XA_MODE "mode"
#define PCMK__XA_NODE_START_STATE "node_start_state"
#define PCMK__XA_NODE_STATE "node_state"
#define PCMK__XA_OPERATION_KEY "operation_key"
#define PCMK__XA_OP_DIGEST "op-digest"
#define PCMK__XA_OP_FORCE_RESTART "op-force-restart"
#define PCMK__XA_OP_RESTART_DIGEST "op-restart-digest"
#define PCMK__XA_OP_SECURE_DIGEST "op-secure-digest"
#define PCMK__XA_OP_SECURE_PARAMS "op-secure-params"
#define PCMK__XA_OP_STATUS "op-status"
#define PCMK__XA_PACEMAKERD_STATE "pacemakerd_state"
#define PCMK__XA_PRIORITY "priority"
#define PCMK__XA_RC_CODE "rc-code"
#define PCMK__XA_REAP "reap"
/* Actions to be executed on Pacemaker Remote nodes are routed through the
* controller on the cluster node hosting the remote connection. That cluster
* node is considered the router node for the action.
*/
#define PCMK__XA_ROUTER_NODE "router_node"
#define PCMK__XA_RSC_ID "rsc-id"
#define PCMK__XA_SCHEMA "schema"
#define PCMK__XA_SCHEMAS "schemas"
#define PCMK__XA_SRC "src"
#define PCMK__XA_SUBT "subt" // subtype
#define PCMK__XA_T "t" // type
#define PCMK__XA_TASK "task"
#define PCMK__XA_TRANSITION_KEY "transition-key"
#define PCMK__XA_TRANSITION_MAGIC "transition-magic"
#define PCMK__XA_UPTIME "uptime"
// @COMPAT Deprecated since 2.1.5
#define PCMK__XA_FIRST_INSTANCE "first-instance"
// @COMPAT Deprecated since 1.1.12
#define PCMK__XA_REF "ref"
// @COMPAT Deprecated since 2.1.6
#define PCMK__XA_REPLACE "replace"
// @COMPAT Deprecated since 2.1.5
#define PCMK__XA_RSC_INSTANCE "rsc-instance"
// @COMPAT Deprecated since 1.1.12
#define PCMK__XA_TAG "tag"
// @COMPAT Deprecated since 2.1.5
#define PCMK__XA_THEN_INSTANCE "then-instance"
// @COMPAT Deprecated since 2.1.5
#define PCMK__XA_WITH_RSC_INSTANCE "with-rsc-instance"
/*
* IPC service names that are only used internally
*/
# define PCMK__SERVER_BASED_RO "cib_ro"
# define PCMK__SERVER_BASED_RW "cib_rw"
# define PCMK__SERVER_BASED_SHM "cib_shm"
/*
* IPC commands that can be sent to Pacemaker daemons
*/
#define PCMK__ATTRD_CMD_PEER_REMOVE "peer-remove"
#define PCMK__ATTRD_CMD_UPDATE "update"
#define PCMK__ATTRD_CMD_UPDATE_BOTH "update-both"
#define PCMK__ATTRD_CMD_UPDATE_DELAY "update-delay"
#define PCMK__ATTRD_CMD_QUERY "query"
#define PCMK__ATTRD_CMD_REFRESH "refresh"
#define PCMK__ATTRD_CMD_FLUSH "flush"
#define PCMK__ATTRD_CMD_SYNC "sync"
#define PCMK__ATTRD_CMD_SYNC_RESPONSE "sync-response"
#define PCMK__ATTRD_CMD_CLEAR_FAILURE "clear-failure"
#define PCMK__ATTRD_CMD_CONFIRM "confirm"
#define PCMK__CONTROLD_CMD_NODES "list-nodes"
#endif /* CRM_INTERNAL__H */
diff --git a/lib/pacemaker/pcmk_graph_consumer.c b/lib/pacemaker/pcmk_graph_consumer.c
index ae4e17182a..d2b1a6461e 100644
--- a/lib/pacemaker/pcmk_graph_consumer.c
+++ b/lib/pacemaker/pcmk_graph_consumer.c
@@ -1,883 +1,883 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/lrmd_internal.h>
#include <pacemaker-internal.h>
/*
* Functions for updating graph
*/
/*!
* \internal
* \brief Update synapse after completed prerequisite
*
* A synapse is ready to be executed once all its prerequisite actions (inputs)
* complete. Given a completed action, check whether it is an input for a given
* synapse, and if so, mark the input as confirmed, and mark the synapse as
* ready if appropriate.
*
* \param[in,out] synapse Transition graph synapse to update
* \param[in] action_id ID of an action that completed
*
* \note The only substantial effect here is confirming synapse inputs.
* should_fire_synapse() will recalculate pcmk__synapse_ready, so the only
* thing that uses the pcmk__synapse_ready from here is
* synapse_state_str().
*/
static void
update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id)
{
if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
return; // All inputs have already been confirmed
}
// Presume ready until proven otherwise
pcmk__set_synapse_flags(synapse, pcmk__synapse_ready);
for (GList *lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data;
if (prereq->id == action_id) {
crm_trace("Confirming input %d of synapse %d",
action_id, synapse->id);
pcmk__set_graph_action_flags(prereq, pcmk__graph_action_confirmed);
} else if (!pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed)) {
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
crm_trace("Synapse %d still not ready after action %d",
synapse->id, action_id);
}
}
if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
crm_trace("Synapse %d is now ready to execute", synapse->id);
}
}
/*!
* \internal
* \brief Update action and synapse confirmation after action completion
*
* \param[in,out] synapse Transition graph synapse that action belongs to
* \param[in] action_id ID of action that completed
*/
static void
update_synapse_confirmed(pcmk__graph_synapse_t *synapse, int action_id)
{
bool all_confirmed = true;
for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data;
if (action->id == action_id) {
crm_trace("Confirmed action %d of synapse %d",
action_id, synapse->id);
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
} else if (all_confirmed &&
!pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) {
all_confirmed = false;
crm_trace("Synapse %d still not confirmed after action %d",
synapse->id, action_id);
}
}
if (all_confirmed
&& !pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
crm_trace("Confirmed synapse %d", synapse->id);
pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
}
}
/*!
* \internal
* \brief Update the transition graph with a completed action result
*
* \param[in,out] graph Transition graph to update
* \param[in] action Action that completed
*/
void
pcmk__update_graph(pcmk__graph_t *graph, const pcmk__graph_action_t *action)
{
for (GList *lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
if (pcmk_any_flags_set(synapse->flags,
pcmk__synapse_confirmed|pcmk__synapse_failed)) {
continue; // This synapse already completed
} else if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
update_synapse_confirmed(synapse, action->id);
} else if (!pcmk_is_set(action->flags, pcmk__graph_action_failed)
|| (synapse->priority == INFINITY)) {
update_synapse_ready(synapse, action->id);
}
}
}
/*
* Functions for executing graph
*/
/* A transition graph consists of various types of actions. The library caller
* registers execution functions for each action type, which will be stored
* here.
*/
static pcmk__graph_functions_t *graph_fns = NULL;
/*!
* \internal
* \brief Set transition graph execution functions
*
* \param[in] Execution functions to use
*/
void
pcmk__set_graph_functions(pcmk__graph_functions_t *fns)
{
crm_debug("Setting custom functions for executing transition graphs");
graph_fns = fns;
CRM_ASSERT(graph_fns != NULL);
CRM_ASSERT(graph_fns->rsc != NULL);
CRM_ASSERT(graph_fns->cluster != NULL);
CRM_ASSERT(graph_fns->pseudo != NULL);
CRM_ASSERT(graph_fns->fence != NULL);
}
/*!
* \internal
* \brief Check whether a graph synapse is ready to be executed
*
* \param[in,out] graph Transition graph that synapse is part of
* \param[in,out] synapse Synapse to check
*
* \return true if synapse is ready, false otherwise
*/
static bool
should_fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
{
GList *lpc = NULL;
pcmk__set_synapse_flags(synapse, pcmk__synapse_ready);
for (lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data;
if (!(pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed))) {
crm_trace("Input %d for synapse %d not yet confirmed",
prereq->id, synapse->id);
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
break;
} else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed)
&& !pcmk_is_set(prereq->flags,
pcmk__graph_action_can_fail)) {
crm_trace("Input %d for synapse %d confirmed but failed",
prereq->id, synapse->id);
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
break;
}
}
if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
crm_trace("Synapse %d is ready to execute", synapse->id);
} else {
return false;
}
for (lpc = synapse->actions; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *a = (pcmk__graph_action_t *) lpc->data;
if (a->type == pcmk__pseudo_graph_action) {
/* None of the below applies to pseudo ops */
} else if (synapse->priority < graph->abort_priority) {
crm_trace("Skipping synapse %d: priority %d is less than "
"abort priority %d",
synapse->id, synapse->priority, graph->abort_priority);
graph->skipped++;
return false;
} else if (graph_fns->allowed && !(graph_fns->allowed(graph, a))) {
crm_trace("Deferring synapse %d: not allowed", synapse->id);
return false;
}
}
return true;
}
/*!
* \internal
* \brief Initiate an action from a transition graph
*
* \param[in,out] graph Transition graph containing action
* \param[in,out] action Action to execute
*
* \return Standard Pacemaker return code
*/
static int
initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
{
const char *id = ID(action->xml);
CRM_CHECK(id != NULL, return EINVAL);
CRM_CHECK(!pcmk_is_set(action->flags, pcmk__graph_action_executed),
return pcmk_rc_already);
pcmk__set_graph_action_flags(action, pcmk__graph_action_executed);
switch (action->type) {
case pcmk__pseudo_graph_action:
crm_trace("Executing pseudo-action %d (%s)", action->id, id);
return graph_fns->pseudo(graph, action);
case pcmk__rsc_graph_action:
crm_trace("Executing resource action %d (%s)", action->id, id);
return graph_fns->rsc(graph, action);
case pcmk__cluster_graph_action:
if (pcmk__str_eq(crm_element_value(action->xml, PCMK_XA_OPERATION),
PCMK_ACTION_STONITH, pcmk__str_none)) {
crm_trace("Executing fencing action %d (%s)",
action->id, id);
return graph_fns->fence(graph, action);
}
crm_trace("Executing cluster action %d (%s)", action->id, id);
return graph_fns->cluster(graph, action);
default:
crm_err("Unsupported graph action type <%s " PCMK_XA_ID "='%s'> "
"(bug?)",
action->xml->name, id);
return EINVAL;
}
}
/*!
* \internal
* \brief Execute a graph synapse
*
* \param[in,out] graph Transition graph with synapse to execute
* \param[in,out] synapse Synapse to execute
*
* \return Standard Pacemaker return value
*/
static int
fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
{
pcmk__set_synapse_flags(synapse, pcmk__synapse_executed);
for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data;
int rc = initiate_action(graph, action);
if (rc != pcmk_rc_ok) {
crm_err("Failed initiating <%s " PCMK_XA_ID "=%d> in synapse %d: "
"%s",
action->xml->name, action->id, synapse->id,
pcmk_rc_str(rc));
pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
pcmk__set_graph_action_flags(action,
pcmk__graph_action_confirmed
|pcmk__graph_action_failed);
return pcmk_rc_error;
}
}
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Dummy graph method that can be used with simulations
*
* \param[in,out] graph Transition graph containing action
* \param[in,out] action Graph action to be initiated
*
* \return Standard Pacemaker return code
* \note If the PE_fail environment variable is set to the action ID,
* then the graph action will be marked as failed.
*/
static int
pseudo_action_dummy(pcmk__graph_t *graph, pcmk__graph_action_t *action)
{
static int fail = -1;
if (fail < 0) {
long long fail_ll;
if ((pcmk__scan_ll(getenv("PE_fail"), &fail_ll, 0LL) == pcmk_rc_ok)
&& (fail_ll > 0LL) && (fail_ll <= INT_MAX)) {
fail = (int) fail_ll;
} else {
fail = 0;
}
}
if (action->id == fail) {
crm_err("Dummy event handler: pretending action %d failed", action->id);
pcmk__set_graph_action_flags(action, pcmk__graph_action_failed);
graph->abort_priority = INFINITY;
} else {
crm_trace("Dummy event handler: action %d initiated", action->id);
}
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
pcmk__update_graph(graph, action);
return pcmk_rc_ok;
}
static pcmk__graph_functions_t default_fns = {
pseudo_action_dummy,
pseudo_action_dummy,
pseudo_action_dummy,
pseudo_action_dummy
};
/*!
* \internal
* \brief Execute all actions in a transition graph
*
* \param[in,out] graph Transition graph to execute
*
* \return Status of transition after execution
*/
enum pcmk__graph_status
pcmk__execute_graph(pcmk__graph_t *graph)
{
GList *lpc = NULL;
int log_level = LOG_DEBUG;
enum pcmk__graph_status pass_result = pcmk__graph_active;
const char *status = "In progress";
if (graph_fns == NULL) {
graph_fns = &default_fns;
}
if (graph == NULL) {
return pcmk__graph_complete;
}
graph->fired = 0;
graph->pending = 0;
graph->skipped = 0;
graph->completed = 0;
graph->incomplete = 0;
// Count completed and in-flight synapses
for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
graph->completed++;
} else if (!pcmk_is_set(synapse->flags, pcmk__synapse_failed)
&& pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
graph->pending++;
}
}
crm_trace("Executing graph %d (%d synapses already completed, %d pending)",
graph->id, graph->completed, graph->pending);
// Execute any synapses that are ready
for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
if ((graph->batch_limit > 0)
&& (graph->pending >= graph->batch_limit)) {
crm_debug("Throttling graph execution: batch limit (%d) reached",
graph->batch_limit);
break;
} else if (pcmk_is_set(synapse->flags, pcmk__synapse_failed)) {
graph->skipped++;
continue;
} else if (pcmk_any_flags_set(synapse->flags,
pcmk__synapse_confirmed
|pcmk__synapse_executed)) {
continue; // Already handled
} else if (should_fire_synapse(graph, synapse)) {
graph->fired++;
if (fire_synapse(graph, synapse) != pcmk_rc_ok) {
crm_err("Synapse %d failed to fire", synapse->id);
log_level = LOG_ERR;
graph->abort_priority = INFINITY;
graph->incomplete++;
graph->fired--;
}
if (!(pcmk_is_set(synapse->flags, pcmk__synapse_confirmed))) {
graph->pending++;
}
} else {
crm_trace("Synapse %d cannot fire", synapse->id);
graph->incomplete++;
}
}
if ((graph->pending == 0) && (graph->fired == 0)) {
graph->complete = true;
if ((graph->incomplete != 0) && (graph->abort_priority <= 0)) {
log_level = LOG_WARNING;
pass_result = pcmk__graph_terminated;
status = "Terminated";
} else if (graph->skipped != 0) {
log_level = LOG_NOTICE;
pass_result = pcmk__graph_complete;
status = "Stopped";
} else {
log_level = LOG_NOTICE;
pass_result = pcmk__graph_complete;
status = "Complete";
}
} else if (graph->fired == 0) {
pass_result = pcmk__graph_pending;
}
do_crm_log(log_level,
"Transition %d (Complete=%d, Pending=%d,"
" Fired=%d, Skipped=%d, Incomplete=%d, Source=%s): %s",
graph->id, graph->completed, graph->pending, graph->fired,
graph->skipped, graph->incomplete, graph->source, status);
return pass_result;
}
/*
* Functions for unpacking transition graph XML into structs
*/
/*!
* \internal
* \brief Unpack a transition graph action from XML
*
* \param[in] parent Synapse that action is part of
* \param[in] xml_action Action XML to unparse
*
* \return Newly allocated action on success, or NULL otherwise
*/
static pcmk__graph_action_t *
unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
{
enum pcmk__graph_action_type action_type;
pcmk__graph_action_t *action = NULL;
const char *value = ID(xml_action);
if (value == NULL) {
crm_err("Ignoring transition graph action without id (bug?)");
crm_log_xml_trace(xml_action, "invalid");
return NULL;
}
if (pcmk__xe_is(xml_action, PCMK__XE_RSC_OP)) {
action_type = pcmk__rsc_graph_action;
- } else if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_PSEUDO_EVENT)) {
+ } else if (pcmk__xe_is(xml_action, PCMK__XE_PSEUDO_EVENT)) {
action_type = pcmk__pseudo_graph_action;
} else if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_CRM_EVENT)) {
action_type = pcmk__cluster_graph_action;
} else {
crm_err("Ignoring transition graph action of unknown type '%s' (bug?)",
xml_action->name);
crm_log_xml_trace(xml_action, "invalid");
return NULL;
}
action = calloc(1, sizeof(pcmk__graph_action_t));
if (action == NULL) {
crm_perror(LOG_CRIT, "Cannot unpack transition graph action");
crm_log_xml_trace(xml_action, "lost");
return NULL;
}
pcmk__scan_min_int(value, &(action->id), -1);
action->type = pcmk__rsc_graph_action;
action->xml = copy_xml(xml_action);
action->synapse = parent;
action->type = action_type;
action->params = xml2list(action->xml);
value = crm_meta_value(action->params, PCMK_META_TIMEOUT);
pcmk__scan_min_int(value, &(action->timeout), 0);
/* Take PCMK_META_START_DELAY into account for the timeout of the action
* timer
*/
value = crm_meta_value(action->params, PCMK_META_START_DELAY);
{
int start_delay;
pcmk__scan_min_int(value, &start_delay, 0);
action->timeout += start_delay;
}
if (pcmk__guint_from_hash(action->params, CRM_META "_" PCMK_META_INTERVAL,
0, &(action->interval_ms)) != pcmk_rc_ok) {
action->interval_ms = 0;
}
value = crm_meta_value(action->params, PCMK__META_CAN_FAIL);
if (value != NULL) {
int can_fail = 0;
if ((crm_str_to_boolean(value, &can_fail) > 0) && (can_fail > 0)) {
pcmk__set_graph_action_flags(action, pcmk__graph_action_can_fail);
} else {
pcmk__clear_graph_action_flags(action, pcmk__graph_action_can_fail);
}
#ifndef PCMK__COMPAT_2_0
if (pcmk_is_set(action->flags, pcmk__graph_action_can_fail)) {
crm_warn("Support for the " PCMK__META_CAN_FAIL " meta-attribute "
"is deprecated and will be removed in a future release");
}
#endif
}
crm_trace("Action %d has timer set to %dms", action->id, action->timeout);
return action;
}
/*!
* \internal
* \brief Unpack transition graph synapse from XML
*
* \param[in,out] new_graph Transition graph that synapse is part of
* \param[in] xml_synapse Synapse XML
*
* \return Newly allocated synapse on success, or NULL otherwise
*/
static pcmk__graph_synapse_t *
unpack_synapse(pcmk__graph_t *new_graph, const xmlNode *xml_synapse)
{
const char *value = NULL;
xmlNode *action_set = NULL;
pcmk__graph_synapse_t *new_synapse = NULL;
crm_trace("Unpacking synapse %s", ID(xml_synapse));
new_synapse = calloc(1, sizeof(pcmk__graph_synapse_t));
if (new_synapse == NULL) {
return NULL;
}
pcmk__scan_min_int(ID(xml_synapse), &(new_synapse->id), 0);
value = crm_element_value(xml_synapse, PCMK__XA_PRIORITY);
pcmk__scan_min_int(value, &(new_synapse->priority), 0);
CRM_CHECK(new_synapse->id >= 0, free(new_synapse);
return NULL);
new_graph->num_synapses++;
crm_trace("Unpacking synapse %s action sets",
crm_element_value(xml_synapse, PCMK_XA_ID));
for (action_set = first_named_child(xml_synapse, "action_set");
action_set != NULL; action_set = crm_next_same_xml(action_set)) {
for (xmlNode *action = pcmk__xml_first_child(action_set);
action != NULL; action = pcmk__xml_next(action)) {
pcmk__graph_action_t *new_action = unpack_action(new_synapse,
action);
if (new_action == NULL) {
continue;
}
crm_trace("Adding action %d to synapse %d",
new_action->id, new_synapse->id);
new_graph->num_actions++;
new_synapse->actions = g_list_append(new_synapse->actions,
new_action);
}
}
crm_trace("Unpacking synapse %s inputs", ID(xml_synapse));
for (xmlNode *inputs = first_named_child(xml_synapse, "inputs");
inputs != NULL; inputs = crm_next_same_xml(inputs)) {
for (xmlNode *trigger = first_named_child(inputs, "trigger");
trigger != NULL; trigger = crm_next_same_xml(trigger)) {
for (xmlNode *input = pcmk__xml_first_child(trigger);
input != NULL; input = pcmk__xml_next(input)) {
pcmk__graph_action_t *new_input = unpack_action(new_synapse,
input);
if (new_input == NULL) {
continue;
}
crm_trace("Adding input %d to synapse %d",
new_input->id, new_synapse->id);
new_synapse->inputs = g_list_append(new_synapse->inputs,
new_input);
}
}
}
return new_synapse;
}
/*!
* \internal
* \brief Unpack transition graph XML
*
* \param[in] xml_graph Transition graph XML to unpack
* \param[in] reference Where the XML came from (for logging)
*
* \return Newly allocated transition graph on success, NULL otherwise
* \note The caller is responsible for freeing the return value using
* pcmk__free_graph().
* \note The XML is expected to be structured like:
<transition_graph ...>
<synapse id="0">
<action_set>
<rsc_op id="2" ...>
...
</action_set>
<inputs>
<rsc_op id="1" ...
...
</inputs>
</synapse>
...
</transition_graph>
*/
pcmk__graph_t *
pcmk__unpack_graph(const xmlNode *xml_graph, const char *reference)
{
pcmk__graph_t *new_graph = NULL;
new_graph = calloc(1, sizeof(pcmk__graph_t));
if (new_graph == NULL) {
return NULL;
}
new_graph->source = strdup((reference == NULL)? "unknown" : reference);
if (new_graph->source == NULL) {
free(new_graph);
return NULL;
}
new_graph->id = -1;
new_graph->abort_priority = 0;
new_graph->network_delay = 0;
new_graph->stonith_timeout = 0;
new_graph->completion_action = pcmk__graph_done;
// Parse top-level attributes from PCMK__XE_TRANSITION_GRAPH
if (xml_graph != NULL) {
const char *buf = crm_element_value(xml_graph, "transition_id");
CRM_CHECK(buf != NULL, free(new_graph);
return NULL);
pcmk__scan_min_int(buf, &(new_graph->id), -1);
buf = crm_element_value(xml_graph, PCMK_OPT_CLUSTER_DELAY);
CRM_CHECK(buf != NULL, free(new_graph);
return NULL);
pcmk_parse_interval_spec(buf, &(new_graph->network_delay));
buf = crm_element_value(xml_graph, PCMK_OPT_STONITH_TIMEOUT);
if (buf == NULL) {
new_graph->stonith_timeout = new_graph->network_delay;
} else {
pcmk_parse_interval_spec(buf, &(new_graph->stonith_timeout));
}
// Use 0 (dynamic limit) as default/invalid, -1 (no limit) as minimum
buf = crm_element_value(xml_graph, PCMK_OPT_BATCH_LIMIT);
if ((buf == NULL)
|| (pcmk__scan_min_int(buf, &(new_graph->batch_limit),
-1) != pcmk_rc_ok)) {
new_graph->batch_limit = 0;
}
buf = crm_element_value(xml_graph, PCMK_OPT_MIGRATION_LIMIT);
pcmk__scan_min_int(buf, &(new_graph->migration_limit), -1);
pcmk__str_update(&(new_graph->failed_stop_offset),
crm_element_value(xml_graph, "failed-stop-offset"));
pcmk__str_update(&(new_graph->failed_start_offset),
crm_element_value(xml_graph, "failed-start-offset"));
if (crm_element_value_epoch(xml_graph, "recheck-by",
&(new_graph->recheck_by)) != pcmk_ok) {
new_graph->recheck_by = 0;
}
}
// Unpack each child <synapse> element
for (const xmlNode *synapse_xml = first_named_child(xml_graph, "synapse");
synapse_xml != NULL; synapse_xml = crm_next_same_xml(synapse_xml)) {
pcmk__graph_synapse_t *new_synapse = unpack_synapse(new_graph,
synapse_xml);
if (new_synapse != NULL) {
new_graph->synapses = g_list_append(new_graph->synapses,
new_synapse);
}
}
crm_debug("Unpacked transition %d from %s: %d actions in %d synapses",
new_graph->id, new_graph->source, new_graph->num_actions,
new_graph->num_synapses);
return new_graph;
}
/*
* Functions for freeing transition graph objects
*/
/*!
* \internal
* \brief Free a transition graph action object
*
* \param[in,out] user_data Action to free
*/
static void
free_graph_action(gpointer user_data)
{
pcmk__graph_action_t *action = user_data;
if (action->timer != 0) {
crm_warn("Cancelling timer for graph action %d", action->id);
g_source_remove(action->timer);
}
if (action->params != NULL) {
g_hash_table_destroy(action->params);
}
free_xml(action->xml);
free(action);
}
/*!
* \internal
* \brief Free a transition graph synapse object
*
* \param[in,out] user_data Synapse to free
*/
static void
free_graph_synapse(gpointer user_data)
{
pcmk__graph_synapse_t *synapse = user_data;
g_list_free_full(synapse->actions, free_graph_action);
g_list_free_full(synapse->inputs, free_graph_action);
free(synapse);
}
/*!
* \internal
* \brief Free a transition graph object
*
* \param[in,out] graph Transition graph to free
*/
void
pcmk__free_graph(pcmk__graph_t *graph)
{
if (graph != NULL) {
g_list_free_full(graph->synapses, free_graph_synapse);
free(graph->source);
free(graph->failed_stop_offset);
free(graph->failed_start_offset);
free(graph);
}
}
/*
* Other transition graph utilities
*/
/*!
* \internal
* \brief Synthesize an executor event from a graph action
*
* \param[in] resource If not NULL, use greater call ID than in this XML
* \param[in] action Graph action
* \param[in] status What to use as event execution status
* \param[in] rc What to use as event exit status
* \param[in] exit_reason What to use as event exit reason
*
* \return Newly allocated executor event on success, or NULL otherwise
*/
lrmd_event_data_t *
pcmk__event_from_graph_action(const xmlNode *resource,
const pcmk__graph_action_t *action,
int status, int rc, const char *exit_reason)
{
lrmd_event_data_t *op = NULL;
GHashTableIter iter;
const char *name = NULL;
const char *value = NULL;
xmlNode *action_resource = NULL;
CRM_CHECK(action != NULL, return NULL);
CRM_CHECK(action->type == pcmk__rsc_graph_action, return NULL);
action_resource = first_named_child(action->xml, PCMK_XE_PRIMITIVE);
CRM_CHECK(action_resource != NULL, crm_log_xml_warn(action->xml, "invalid");
return NULL);
op = lrmd_new_event(ID(action_resource),
crm_element_value(action->xml, PCMK_XA_OPERATION),
action->interval_ms);
lrmd__set_result(op, rc, status, exit_reason);
op->t_run = time(NULL);
op->t_rcchange = op->t_run;
op->params = pcmk__strkey_table(free, free);
g_hash_table_iter_init(&iter, action->params);
while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) {
g_hash_table_insert(op->params, strdup(name), strdup(value));
}
for (xmlNode *xop = pcmk__xml_first_child(resource); xop != NULL;
xop = pcmk__xml_next(xop)) {
int tmp = 0;
crm_element_value_int(xop, PCMK__XA_CALL_ID, &tmp);
crm_debug("Got call_id=%d for %s", tmp, ID(resource));
if (tmp > op->call_id) {
op->call_id = tmp;
}
}
op->call_id++;
return op;
}
diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c
index c9557fc219..a8cb1e6584 100644
--- a/lib/pacemaker/pcmk_graph_producer.c
+++ b/lib/pacemaker/pcmk_graph_producer.c
@@ -1,1096 +1,1096 @@
/*
* Copyright 2004-2024 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <glib.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
// Convenience macros for logging action properties
#define action_type_str(flags) \
(pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
(pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
(pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
/*!
* \internal
* \brief Add an XML node tag for a specified ID
*
* \param[in] id Node UUID to add
* \param[in,out] xml Parent XML tag to add to
*/
static xmlNode*
add_node_to_xml_by_id(const char *id, xmlNode *xml)
{
xmlNode *node_xml;
node_xml = create_xml_node(xml, PCMK_XE_NODE);
crm_xml_add(node_xml, PCMK_XA_ID, id);
return node_xml;
}
/*!
* \internal
* \brief Add an XML node tag for a specified node
*
* \param[in] node Node to add
* \param[in,out] xml XML to add node to
*/
static void
add_node_to_xml(const pcmk_node_t *node, void *xml)
{
add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
}
/*!
* \internal
* \brief Count (optionally add to XML) nodes needing maintenance state update
*
* \param[in,out] xml Parent XML tag to add to, if any
* \param[in] scheduler Scheduler data
*
* \return Count of nodes added
* \note Only Pacemaker Remote nodes are considered currently
*/
static int
add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler)
{
xmlNode *maintenance = NULL;
int count = 0;
if (xml != NULL) {
maintenance = create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE);
}
for (const GList *iter = scheduler->nodes;
iter != NULL; iter = iter->next) {
const pcmk_node_t *node = iter->data;
if (pe__is_guest_or_remote_node(node) &&
(node->details->maintenance != node->details->remote_maintenance)) {
if (maintenance != NULL) {
crm_xml_add(add_node_to_xml_by_id(node->details->id,
maintenance),
XML_NODE_IS_MAINTENANCE,
(node->details->maintenance? "1" : "0"));
}
count++;
}
}
crm_trace("%s %d nodes in need of maintenance mode update in state",
((maintenance == NULL)? "Counted" : "Added"), count);
return count;
}
/*!
* \internal
* \brief Add pseudo action with nodes needing maintenance state update
*
* \param[in,out] scheduler Scheduler data
*/
static void
add_maintenance_update(pcmk_scheduler_t *scheduler)
{
pcmk_action_t *action = NULL;
if (add_maintenance_nodes(NULL, scheduler) != 0) {
action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler);
pcmk__set_action_flags(action, pcmk_action_always_in_graph);
}
}
/*!
* \internal
* \brief Add XML with nodes that an action is expected to bring down
*
* If a specified action is expected to bring any nodes down, add an XML block
* with their UUIDs. When a node is lost, this allows the controller to
* determine whether it was expected.
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] action Action to check for downed nodes
*/
static void
add_downed_nodes(xmlNode *xml, const pcmk_action_t *action)
{
CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL),
return);
if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
/* Shutdown makes the action's node down */
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
/* Fencing makes the action's node and any hosted guest nodes down */
const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
if (pcmk__is_fencing_action(fence)) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
pe_foreach_guest_node(action->node->details->data_set,
action->node, add_node_to_xml, downed);
}
} else if (action->rsc && action->rsc->is_remote_node
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP,
pcmk__str_none)) {
/* Stopping a remote connection resource makes connected node down,
* unless it's part of a migration
*/
GList *iter;
pcmk_action_t *input;
bool migrating = false;
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
input = ((pcmk__related_action_t *) iter->data)->action;
if ((input->rsc != NULL)
&& pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none)
&& pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
migrating = true;
break;
}
}
if (!migrating) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->rsc->id, downed);
}
}
}
/*!
* \internal
* \brief Create a transition graph operation key for a clone action
*
* \param[in] action Clone action
* \param[in] interval_ms Action interval in milliseconds
*
* \return Newly allocated string with transition graph operation key
*/
static char *
clone_op_key(const pcmk_action_t *action, guint interval_ms)
{
if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
const char *n_task = g_hash_table_lookup(action->meta,
"notify_operation");
CRM_LOG_ASSERT((n_type != NULL) && (n_task != NULL));
return pcmk__notify_key(action->rsc->clone_name, n_type, n_task);
} else if (action->cancel_task != NULL) {
return pcmk__op_key(action->rsc->clone_name, action->cancel_task,
interval_ms);
} else {
return pcmk__op_key(action->rsc->clone_name, action->task, interval_ms);
}
}
/*!
* \internal
* \brief Add node details to transition graph action XML
*
* \param[in] action Scheduled action
* \param[in,out] xml Transition graph action XML for \p action
*/
static void
add_node_details(const pcmk_action_t *action, xmlNode *xml)
{
pcmk_node_t *router_node = pcmk__connection_host_for_action(action);
crm_xml_add(xml, PCMK__META_ON_NODE, action->node->details->uname);
crm_xml_add(xml, PCMK__META_ON_NODE_UUID, action->node->details->id);
if (router_node != NULL) {
crm_xml_add(xml, PCMK__XA_ROUTER_NODE, router_node->details->uname);
}
}
/*!
* \internal
* \brief Add resource details to transition graph action XML
*
* \param[in] action Scheduled action
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
add_resource_details(const pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *rsc_xml = NULL;
const char *attr_list[] = {
PCMK_XA_CLASS,
PCMK_XA_PROVIDER,
PCMK_XA_TYPE,
};
/* If a resource is locked to a node via PCMK_OPT_SHUTDOWN_LOCK, mark its
* actions so the controller can preserve the lock when the action
* completes.
*/
if (pcmk__action_locks_rsc_to_node(action)) {
crm_xml_add_ll(action_xml, PCMK_OPT_SHUTDOWN_LOCK,
(long long) action->rsc->lock_time);
}
// List affected resource
rsc_xml = create_xml_node(action_xml,
(const char *) action->rsc->xml->name);
if (pcmk_is_set(action->rsc->flags, pcmk_rsc_removed)
&& (action->rsc->clone_name != NULL)) {
/* Use the numbered instance name here, because if there is more
* than one instance on a node, we need to make sure the command
* goes to the right one.
*
* This is important even for anonymous clones, because the clone's
* unique meta-attribute might have just been toggled from on to
* off.
*/
crm_debug("Using orphan clone name %s instead of %s",
action->rsc->id, action->rsc->clone_name);
crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->clone_name);
crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id);
} else if (!pcmk_is_set(action->rsc->flags, pcmk_rsc_unique)) {
const char *xml_id = ID(action->rsc->xml);
crm_debug("Using anonymous clone name %s for %s (aka %s)",
xml_id, action->rsc->id, action->rsc->clone_name);
/* ID is what we'd like client to use
* LONG_ID is what they might know it as instead
*
* LONG_ID is only strictly needed /here/ during the
* transition period until all nodes in the cluster
* are running the new software /and/ have rebooted
* once (meaning that they've only ever spoken to a DC
* supporting this feature).
*
* If anyone toggles the unique flag to 'on', the
* 'instance free' name will correspond to an orphan
* and fall into the clause above instead
*/
crm_xml_add(rsc_xml, PCMK_XA_ID, xml_id);
if ((action->rsc->clone_name != NULL)
&& !pcmk__str_eq(xml_id, action->rsc->clone_name,
pcmk__str_none)) {
crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->clone_name);
} else {
crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id);
}
} else {
CRM_ASSERT(action->rsc->clone_name == NULL);
crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->id);
}
for (int lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) {
crm_xml_add(rsc_xml, attr_list[lpc],
g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
}
}
/*!
* \internal
* \brief Add action attributes to transition graph action XML
*
* \param[in,out] action Scheduled action
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
add_action_attributes(pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *args_xml = NULL;
/* We create free-standing XML to start, so we can sort the attributes
* before adding it to action_xml, which keeps the scheduler regression
* test graphs comparable.
*/
args_xml = create_xml_node(NULL, PCMK__XE_ATTRIBUTES);
crm_xml_add(args_xml, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
g_hash_table_foreach(action->extra, hash2field, args_xml);
if ((action->rsc != NULL) && (action->node != NULL)) {
// Get the resource instance attributes, evaluated properly for node
GHashTable *params = pe_rsc_params(action->rsc, action->node,
action->rsc->cluster);
pcmk__substitute_remote_addr(action->rsc, params);
g_hash_table_foreach(params, hash2smartfield, args_xml);
} else if ((action->rsc != NULL)
&& (action->rsc->variant <= pcmk_rsc_variant_primitive)) {
GHashTable *params = pe_rsc_params(action->rsc, NULL,
action->rsc->cluster);
g_hash_table_foreach(params, hash2smartfield, args_xml);
}
g_hash_table_foreach(action->meta, hash2metafield, args_xml);
if (action->rsc != NULL) {
pcmk_resource_t *parent = action->rsc;
while (parent != NULL) {
parent->cmds->add_graph_meta(parent, args_xml);
parent = parent->parent;
}
pcmk__add_guest_meta_to_xml(args_xml, action);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)
&& (action->node != NULL)) {
/* Pass the node's attributes as meta-attributes.
*
* @TODO: Determine whether it is still necessary to do this. It was
* added in 33d99707, probably for the libfence-based implementation in
* c9a90bd, which is no longer used.
*/
g_hash_table_foreach(action->node->details->attrs, hash2metafield,
args_xml);
}
sorted_xml(args_xml, action_xml, FALSE);
free_xml(args_xml);
}
/*!
* \internal
* \brief Create the transition graph XML for a scheduled action
*
* \param[in,out] parent Parent XML element to add action to
* \param[in,out] action Scheduled action
* \param[in] skip_details If false, add action details as sub-elements
* \param[in] scheduler Scheduler data
*/
static void
create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details,
const pcmk_scheduler_t *scheduler)
{
bool needs_node_info = true;
bool needs_maintenance_info = false;
xmlNode *action_xml = NULL;
if ((action == NULL) || (scheduler == NULL)) {
return;
}
// Create the top-level element based on task
if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) {
/* All fences need node info; guest node fences are pseudo-events */
if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
- action_xml = create_xml_node(parent, XML_GRAPH_TAG_PSEUDO_EVENT);
+ action_xml = create_xml_node(parent, PCMK__XE_PSEUDO_EVENT);
} else {
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
}
} else if (pcmk__str_any_of(action->task,
PCMK_ACTION_DO_SHUTDOWN,
PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) {
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
} else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE,
pcmk__str_none)) {
// CIB-only clean-up for shutdown locks
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
crm_xml_add(action_xml, PCMK__XA_MODE, PCMK__VALUE_CIB);
} else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES,
pcmk__str_none)) {
needs_maintenance_info = true;
}
- action_xml = create_xml_node(parent, XML_GRAPH_TAG_PSEUDO_EVENT);
+ action_xml = create_xml_node(parent, PCMK__XE_PSEUDO_EVENT);
needs_node_info = false;
} else {
action_xml = create_xml_node(parent, PCMK__XE_RSC_OP);
}
crm_xml_add_int(action_xml, PCMK_XA_ID, action->id);
crm_xml_add(action_xml, PCMK_XA_OPERATION, action->task);
if ((action->rsc != NULL) && (action->rsc->clone_name != NULL)) {
char *clone_key = NULL;
guint interval_ms;
if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0,
&interval_ms) != pcmk_rc_ok) {
interval_ms = 0;
}
clone_key = clone_op_key(action, interval_ms);
crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, clone_key);
crm_xml_add(action_xml, "internal_" PCMK__XA_OPERATION_KEY,
action->uuid);
free(clone_key);
} else {
crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, action->uuid);
}
if (needs_node_info && (action->node != NULL)) {
add_node_details(action, action_xml);
g_hash_table_insert(action->meta, strdup(PCMK__META_ON_NODE),
strdup(action->node->details->uname));
g_hash_table_insert(action->meta, strdup(PCMK__META_ON_NODE_UUID),
strdup(action->node->details->id));
}
if (skip_details) {
return;
}
if ((action->rsc != NULL)
&& !pcmk_is_set(action->flags, pcmk_action_pseudo)) {
// This is a real resource action, so add resource details
add_resource_details(action, action_xml);
}
/* List any attributes in effect */
add_action_attributes(action, action_xml);
/* List any nodes this action is expected to make down */
if (needs_node_info && (action->node != NULL)) {
add_downed_nodes(action_xml, action);
}
if (needs_maintenance_info) {
add_maintenance_nodes(action_xml, scheduler);
}
}
/*!
* \internal
* \brief Check whether an action should be added to the transition graph
*
* \param[in] action Action to check
*
* \return true if action should be added to graph, otherwise false
*/
static bool
should_add_action_to_graph(const pcmk_action_t *action)
{
if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring action %s (%d): unrunnable",
action->uuid, action->id);
return false;
}
if (pcmk_is_set(action->flags, pcmk_action_optional)
&& !pcmk_is_set(action->flags, pcmk_action_always_in_graph)) {
crm_trace("Ignoring action %s (%d): optional",
action->uuid, action->id);
return false;
}
/* Actions for unmanaged resources should be excluded from the graph,
* with the exception of monitors and cancellation of recurring monitors.
*/
if ((action->rsc != NULL)
&& !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
const char *interval_ms_s;
/* A cancellation of a recurring monitor will get here because the task
* is cancel rather than monitor, but the interval can still be used to
* recognize it. The interval has been normalized to milliseconds by
* this point, so a string comparison is sufficient.
*/
interval_ms_s = g_hash_table_lookup(action->meta, PCMK_META_INTERVAL);
if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) {
crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)",
action->uuid, action->id, action->rsc->id);
return false;
}
}
/* Always add pseudo-actions, fence actions, and shutdown actions (already
* determined to be required and runnable by this point)
*/
if (pcmk_is_set(action->flags, pcmk_action_pseudo)
|| pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH,
PCMK_ACTION_DO_SHUTDOWN, NULL)) {
return true;
}
if (action->node == NULL) {
pcmk__sched_err("Skipping action %s (%d) "
"because it was not assigned to a node (bug?)",
action->uuid, action->id);
pcmk__log_action("Unassigned", action, false);
return false;
}
if (pcmk_is_set(action->flags, pcmk_action_on_dc)) {
crm_trace("Action %s (%d) should be dumped: "
"can run on DC instead of %s",
action->uuid, action->id, pcmk__node_name(action->node));
} else if (pe__is_guest_node(action->node)
&& !action->node->details->remote_requires_reset) {
crm_trace("Action %s (%d) should be dumped: "
"assuming will be runnable on guest %s",
action->uuid, action->id, pcmk__node_name(action->node));
} else if (!action->node->details->online) {
pcmk__sched_err("Skipping action %s (%d) "
"because it was scheduled for offline node (bug?)",
action->uuid, action->id);
pcmk__log_action("Offline node", action, false);
return false;
} else if (action->node->details->unclean) {
pcmk__sched_err("Skipping action %s (%d) "
"because it was scheduled for unclean node (bug?)",
action->uuid, action->id);
pcmk__log_action("Unclean node", action, false);
return false;
}
return true;
}
/*!
* \internal
* \brief Check whether an ordering's flags can change an action
*
* \param[in] ordering Ordering to check
*
* \return true if ordering has flags that can change an action, false otherwise
*/
static bool
ordering_can_change_actions(const pcmk__related_action_t *ordering)
{
return pcmk_any_flags_set(ordering->type,
~(pcmk__ar_then_implies_first_graphed
|pcmk__ar_first_implies_then_graphed
|pcmk__ar_ordered));
}
/*!
* \internal
* \brief Check whether an action input should be in the transition graph
*
* \param[in] action Action to check
* \param[in,out] input Action input to check
*
* \return true if input should be in graph, false otherwise
* \note This function may not only check an input, but disable it under certian
* circumstances (load or anti-colocation orderings that are not needed).
*/
static bool
should_add_input_to_graph(const pcmk_action_t *action,
pcmk__related_action_t *input)
{
if (input->state == pe_link_dumped) {
return true;
}
if ((uint32_t) input->type == pcmk__ar_none) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering disabled",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
&& !ordering_can_change_actions(input)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional and input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
&& pcmk_is_set(input->type, pcmk__ar_min_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"minimum number of instances required but input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(input->type, pcmk__ar_unmigratable_then_blocks)
&& !pcmk_is_set(input->action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input blocked if 'then' unmigratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (pcmk_is_set(input->type, pcmk__ar_if_first_unmigratable)
&& pcmk_is_set(input->action->flags, pcmk_action_migratable)) {
crm_trace("Ignoring %s (%d) input %s (%d): ordering applies "
"only if input is unmigratable, but it is migratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if (((uint32_t) input->type == pcmk__ar_ordered)
&& pcmk_is_set(input->action->flags, pcmk_action_migratable)
&& pcmk__ends_with(input->action->uuid, "_stop_0")) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional but stop in migration",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
} else if ((uint32_t) input->type == pcmk__ar_if_on_same_node_or_target) {
pcmk_node_t *input_node = input->action->node;
if ((action->rsc != NULL)
&& pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
pcmk_node_t *assigned = action->rsc->allocated_to;
/* For load_stopped -> migrate_to orderings, we care about where
* the resource has been assigned, not where migrate_to will be
* executed.
*/
if (!pcmk__same_node(input_node, assigned)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"migration target %s is not same as input node %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
(assigned? assigned->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if (!pcmk__same_node(input_node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
(action->node? action->node->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
} else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if ((uint32_t) input->type == pcmk__ar_if_required_on_same_node) {
if (input->action->node && action->node
&& !pcmk__same_node(input->action->node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
pcmk__node_name(action->node),
pcmk__node_name(input->action->node));
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
} else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if (input->action->rsc
&& input->action->rsc != action->rsc
&& pcmk_is_set(input->action->rsc->flags, pcmk_rsc_failed)
&& !pcmk_is_set(input->action->rsc->flags, pcmk_rsc_managed)
&& pcmk__ends_with(input->action->uuid, "_stop_0")
&& action->rsc && pe_rsc_is_clone(action->rsc)) {
crm_warn("Ignoring requirement that %s complete before %s:"
" unmanaged failed resources cannot prevent clone shutdown",
input->action->uuid, action->uuid);
return false;
} else if (pcmk_is_set(input->action->flags, pcmk_action_optional)
&& !pcmk_any_flags_set(input->action->flags,
pcmk_action_always_in_graph
|pcmk_action_added_to_graph)
&& !should_add_action_to_graph(input->action)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
}
crm_trace("%s (%d) input %s %s (%d) on %s should be dumped: %s %s %#.6x",
action->uuid, action->id, action_type_str(input->action->flags),
input->action->uuid, input->action->id,
action_node_str(input->action),
action_runnable_str(input->action->flags),
action_optional_str(input->action->flags), input->type);
return true;
}
/*!
* \internal
* \brief Check whether an ordering creates an ordering loop
*
* \param[in] init_action "First" action in ordering
* \param[in] action Callers should always set this the same as
* \p init_action (this function may use a different
* value for recursive calls)
* \param[in,out] input Action wrapper for "then" action in ordering
*
* \return true if the ordering creates a loop, otherwise false
*/
bool
pcmk__graph_has_loop(const pcmk_action_t *init_action,
const pcmk_action_t *action, pcmk__related_action_t *input)
{
bool has_loop = false;
if (pcmk_is_set(input->action->flags, pcmk_action_detect_loop)) {
crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
action->uuid,
action->node? action->node->details->uname : "",
input->type);
return false;
}
// Don't need to check inputs that won't be used
if (!should_add_input_to_graph(action, input)) {
return false;
}
if (input->action == init_action) {
crm_debug("Input loop found in %s@%s ->...-> %s@%s",
action->uuid,
action->node? action->node->details->uname : "",
init_action->uuid,
init_action->node? init_action->node->details->uname : "");
return true;
}
pcmk__set_action_flags(input->action, pcmk_action_detect_loop);
crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)"
"for graph loop with %s@%s ",
action->uuid,
action->node? action->node->details->uname : "",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
input->type,
init_action->uuid,
init_action->node? init_action->node->details->uname : "");
// Recursively check input itself for loops
for (GList *iter = input->action->actions_before;
iter != NULL; iter = iter->next) {
if (pcmk__graph_has_loop(init_action, input->action,
(pcmk__related_action_t *) iter->data)) {
// Recursive call already logged a debug message
has_loop = true;
break;
}
}
pcmk__clear_action_flags(input->action, pcmk_action_detect_loop);
if (!has_loop) {
crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
action->uuid,
action->node? action->node->details->uname : "",
input->type);
}
return has_loop;
}
/*!
* \internal
* \brief Create a synapse XML element for a transition graph
*
* \param[in] action Action that synapse is for
* \param[in,out] scheduler Scheduler data containing graph
*
* \return Newly added XML element for new graph synapse
*/
static xmlNode *
create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler)
{
int synapse_priority = 0;
xmlNode *syn = create_xml_node(scheduler->graph, "synapse");
crm_xml_add_int(syn, PCMK_XA_ID, scheduler->num_synapse);
scheduler->num_synapse++;
if (action->rsc != NULL) {
synapse_priority = action->rsc->priority;
}
if (action->priority > synapse_priority) {
synapse_priority = action->priority;
}
if (synapse_priority > 0) {
crm_xml_add_int(syn, PCMK__XA_PRIORITY, synapse_priority);
}
return syn;
}
/*!
* \internal
* \brief Add an action to the transition graph XML if appropriate
*
* \param[in,out] data Action to possibly add
* \param[in,out] user_data Scheduler data
*
* \note This will de-duplicate the action inputs, meaning that the
* pcmk__related_action_t:type flags can no longer be relied on to retain
* their original settings. That means this MUST be called after
* pcmk__apply_orderings() is complete, and nothing after this should rely
* on those type flags. (For example, some code looks for type equal to
* some flag rather than whether the flag is set, and some code looks for
* particular combinations of flags -- such code must be done before
* pcmk__create_graph().)
*/
static void
add_action_to_graph(gpointer data, gpointer user_data)
{
pcmk_action_t *action = (pcmk_action_t *) data;
pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data;
xmlNode *syn = NULL;
xmlNode *set = NULL;
xmlNode *in = NULL;
/* If we haven't already, de-duplicate inputs (even if we won't be adding
* the action to the graph, so that crm_simulate's dot graphs don't have
* duplicates).
*/
if (!pcmk_is_set(action->flags, pcmk_action_inputs_deduplicated)) {
pcmk__deduplicate_action_inputs(action);
pcmk__set_action_flags(action, pcmk_action_inputs_deduplicated);
}
if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)
|| !should_add_action_to_graph(action)) {
return; // Already added, or shouldn't be
}
pcmk__set_action_flags(action, pcmk_action_added_to_graph);
crm_trace("Adding action %d (%s%s%s) to graph",
action->id, action->uuid,
((action->node == NULL)? "" : " on "),
((action->node == NULL)? "" : action->node->details->uname));
syn = create_graph_synapse(action, scheduler);
set = create_xml_node(syn, "action_set");
in = create_xml_node(syn, "inputs");
create_graph_action(set, action, false, scheduler);
for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *input = lpc->data;
if (should_add_input_to_graph(action, input)) {
xmlNode *input_xml = create_xml_node(in, "trigger");
input->state = pe_link_dumped;
create_graph_action(input_xml, input->action, true, scheduler);
}
}
}
static int transition_id = -1;
/*!
* \internal
* \brief Log a message after calculating a transition
*
* \param[in] filename Where transition input is stored
*/
void
pcmk__log_transition_summary(const char *filename)
{
if (was_processing_error || crm_config_error) {
crm_err("Calculated transition %d (with errors)%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
} else if (was_processing_warning || crm_config_warning) {
crm_warn("Calculated transition %d (with warnings)%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
} else {
crm_notice("Calculated transition %d%s%s",
transition_id,
(filename == NULL)? "" : ", saving inputs in ",
(filename == NULL)? "" : filename);
}
if (crm_config_error) {
crm_notice("Configuration errors found during scheduler processing,"
" please run \"crm_verify -L\" to identify issues");
}
}
/*!
* \internal
* \brief Add a resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void
pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc)
{
GList *iter = NULL;
CRM_ASSERT(rsc != NULL);
pcmk__rsc_trace(rsc, "Adding actions for %s to graph", rsc->id);
// First add the resource's own actions
g_list_foreach(rsc->actions, add_action_to_graph, rsc->cluster);
// Then recursively add its children's actions (appropriate to variant)
for (iter = rsc->children; iter != NULL; iter = iter->next) {
pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->cmds->add_actions_to_graph(child_rsc);
}
}
/*!
* \internal
* \brief Create a transition graph with all cluster actions needed
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__create_graph(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
const char *value = NULL;
long long limit = 0LL;
transition_id++;
crm_trace("Creating transition graph %d", transition_id);
scheduler->graph = create_xml_node(NULL, PCMK__XE_TRANSITION_GRAPH);
value = pe_pref(scheduler->config_hash, PCMK_OPT_CLUSTER_DELAY);
crm_xml_add(scheduler->graph, PCMK_OPT_CLUSTER_DELAY, value);
value = pe_pref(scheduler->config_hash, PCMK_OPT_STONITH_TIMEOUT);
crm_xml_add(scheduler->graph, PCMK_OPT_STONITH_TIMEOUT, value);
crm_xml_add(scheduler->graph, "failed-stop-offset", "INFINITY");
if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
crm_xml_add(scheduler->graph, "failed-start-offset", "INFINITY");
} else {
crm_xml_add(scheduler->graph, "failed-start-offset", "1");
}
value = pe_pref(scheduler->config_hash, PCMK_OPT_BATCH_LIMIT);
crm_xml_add(scheduler->graph, PCMK_OPT_BATCH_LIMIT, value);
crm_xml_add_int(scheduler->graph, "transition_id", transition_id);
value = pe_pref(scheduler->config_hash, PCMK_OPT_MIGRATION_LIMIT);
if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
crm_xml_add(scheduler->graph, PCMK_OPT_MIGRATION_LIMIT, value);
}
if (scheduler->recheck_by > 0) {
char *recheck_epoch = NULL;
recheck_epoch = crm_strdup_printf("%llu",
(long long) scheduler->recheck_by);
crm_xml_add(scheduler->graph, "recheck-by", recheck_epoch);
free(recheck_epoch);
}
/* The following code will de-duplicate action inputs, so nothing past this
* should rely on the action input type flags retaining their original
* values.
*/
// Add resource actions to graph
for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
pcmk__rsc_trace(rsc, "Processing actions for %s", rsc->id);
rsc->cmds->add_actions_to_graph(rsc);
}
// Add pseudo-action for list of nodes with maintenance state update
add_maintenance_update(scheduler);
// Add non-resource (node) actions
for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
pcmk_action_t *action = (pcmk_action_t *) iter->data;
if ((action->rsc != NULL)
&& (action->node != NULL)
&& action->node->details->shutdown
&& !pcmk_is_set(action->rsc->flags, pcmk_rsc_maintenance)
&& !pcmk_any_flags_set(action->flags,
pcmk_action_optional|pcmk_action_runnable)
&& pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* Eventually we should just ignore the 'fence' case, but for now
* it's the best way to detect (in CTS) when CIB resource updates
* are being lost.
*/
if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
|| (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) {
const bool managed = pcmk_is_set(action->rsc->flags,
pcmk_rsc_managed);
const bool failed = pcmk_is_set(action->rsc->flags,
pcmk_rsc_failed);
crm_crit("Cannot %s %s because of %s:%s%s (%s)",
action->node->details->unclean? "fence" : "shut down",
pcmk__node_name(action->node), action->rsc->id,
(managed? " blocked" : " unmanaged"),
(failed? " failed" : ""), action->uuid);
}
}
add_action_to_graph((gpointer) action, (gpointer) scheduler);
}
crm_log_xml_trace(scheduler->graph, "graph");
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Jul 8, 6:01 PM (1 d, 2 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1988690
Default Alt Text
(129 KB)

Event Timeline