Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c
index da6a9d6969..799d1b4eac 100644
--- a/daemons/controld/controld_join_client.c
+++ b/daemons/controld/controld_join_client.c
@@ -1,366 +1,369 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <pacemaker-controld.h>
void join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data);
extern ha_msg_input_t *copy_ha_msg_input(ha_msg_input_t * orig);
/*!
* \internal
* \brief Remember if DC is shutting down as we join
*
* If we're joining while the current DC is shutting down, update its expected
* state, so we don't fence it if we become the new DC. (We weren't a peer
* when it broadcast its shutdown request.)
*
* \param[in] msg A join message from the DC
*/
static void
update_dc_expected(const xmlNode *msg)
{
if ((controld_globals.dc_name != NULL)
&& pcmk__xe_attr_is_true(msg, F_CRM_DC_LEAVING)) {
crm_node_t *dc_node = crm_get_peer(0, controld_globals.dc_name);
pcmk__update_peer_expected(__func__, dc_node, CRMD_JOINSTATE_DOWN);
}
}
/* A_CL_JOIN_QUERY */
/* is there a DC out there? */
void
do_cl_join_query(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *req = create_request(CRM_OP_JOIN_ANNOUNCE, NULL, NULL,
CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL);
sleep(1); // Give the cluster layer time to propagate to the DC
update_dc(NULL); /* Unset any existing value so that the result is not discarded */
crm_debug("Querying for a DC");
send_cluster_message(NULL, crm_msg_crmd, req, FALSE);
free_xml(req);
}
/* A_CL_JOIN_ANNOUNCE */
/* this is kind of a workaround for the fact that we may not be around or
* are otherwise unable to reply when the DC sends out A_DC_JOIN_OFFER_ALL
*/
void
do_cl_join_announce(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
/* don't announce if we're in one of these states */
if (cur_state != S_PENDING) {
crm_warn("Not announcing cluster join because in state %s",
fsa_state2string(cur_state));
return;
}
if (!pcmk_is_set(controld_globals.fsa_input_register, R_STARTING)) {
/* send as a broadcast */
xmlNode *req = create_request(CRM_OP_JOIN_ANNOUNCE, NULL, NULL,
CRM_SYSTEM_DC, CRM_SYSTEM_CRMD, NULL);
crm_debug("Announcing availability");
update_dc(NULL);
send_cluster_message(NULL, crm_msg_crmd, req, FALSE);
free_xml(req);
} else {
/* Delay announce until we have finished local startup */
crm_warn("Delaying announce of cluster join until local startup is complete");
return;
}
}
static int query_call_id = 0;
/* A_CL_JOIN_REQUEST */
/* aka. accept the welcome offer */
void
do_cl_join_offer_respond(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
cib_t *cib_conn = controld_globals.cib_conn;
ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
const char *welcome_from;
const char *join_id;
CRM_CHECK(input != NULL, return);
#if 0
if (we are sick) {
log error;
/* save the request for later? */
return;
}
#endif
welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM);
join_id = crm_element_value(input->msg, F_CRM_JOIN_ID);
crm_trace("Accepting cluster join offer from node %s "CRM_XS" join-%s",
welcome_from, crm_element_value(input->msg, F_CRM_JOIN_ID));
/* we only ever want the last one */
if (query_call_id > 0) {
crm_trace("Cancelling previous join query: %d", query_call_id);
remove_cib_op_callback(query_call_id, FALSE);
query_call_id = 0;
}
if (update_dc(input->msg) == FALSE) {
crm_warn("Discarding cluster join offer from node %s (expected %s)",
welcome_from, controld_globals.dc_name);
return;
}
update_dc_expected(input->msg);
query_call_id = cib_conn->cmds->query(cib_conn, NULL, NULL,
cib_scope_local|cib_no_children);
fsa_register_cib_callback(query_call_id, strdup(join_id),
join_query_callback);
crm_trace("Registered join query callback: %d", query_call_id);
controld_set_fsa_action_flags(A_DC_TIMER_STOP);
controld_trigger_fsa();
}
void
join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
char *join_id = user_data;
xmlNode *generation = create_xml_node(NULL, XML_CIB_TAG_GENERATION_TUPPLE);
CRM_LOG_ASSERT(join_id != NULL);
if (query_call_id != call_id) {
crm_trace("Query %d superseded", call_id);
goto done;
}
query_call_id = 0;
if(rc != pcmk_ok || output == NULL) {
crm_err("Could not retrieve version details for join-%s: %s (%d)",
join_id, pcmk_strerror(rc), rc);
register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__);
} else if (controld_globals.dc_name == NULL) {
crm_debug("Membership is in flux, not continuing join-%s", join_id);
} else {
xmlNode *reply = NULL;
crm_debug("Respond to join offer join-%s from %s",
join_id, controld_globals.dc_name);
copy_in_properties(generation, output);
reply = create_request(CRM_OP_JOIN_REQUEST, generation,
controld_globals.dc_name, CRM_SYSTEM_DC,
CRM_SYSTEM_CRMD, NULL);
crm_xml_add(reply, F_CRM_JOIN_ID, join_id);
crm_xml_add(reply, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
send_cluster_message(crm_get_peer(0, controld_globals.dc_name),
crm_msg_crmd, reply, TRUE);
free_xml(reply);
}
done:
free_xml(generation);
}
-static void
-set_join_state(const char * start_state)
+void
+set_join_state(const char *start_state, const char *node_name, const char *node_uuid,
+ bool remote)
{
if (pcmk__str_eq(start_state, "standby", pcmk__str_casei)) {
crm_notice("Forcing node %s to join in %s state per configured "
- "environment", controld_globals.our_nodename, start_state);
+ "environment", node_name, start_state);
cib__update_node_attr(controld_globals.logger_out,
controld_globals.cib_conn, cib_sync_call,
- XML_CIB_TAG_NODES, controld_globals.our_uuid,
- NULL, NULL, NULL, "standby", "on", NULL, NULL);
+ XML_CIB_TAG_NODES, node_uuid,
+ NULL, NULL, NULL, "standby", "on", NULL,
+ remote ? "remote" : NULL);
} else if (pcmk__str_eq(start_state, "online", pcmk__str_casei)) {
crm_notice("Forcing node %s to join in %s state per configured "
- "environment", controld_globals.our_nodename, start_state);
+ "environment", node_name, start_state);
cib__update_node_attr(controld_globals.logger_out,
controld_globals.cib_conn, cib_sync_call,
- XML_CIB_TAG_NODES, controld_globals.our_uuid,
- NULL, NULL, NULL, "standby", "off", NULL, NULL);
+ XML_CIB_TAG_NODES, node_uuid,
+ NULL, NULL, NULL, "standby", "off", NULL,
+ remote ? "remote" : NULL);
} else if (pcmk__str_eq(start_state, "default", pcmk__str_casei)) {
- crm_debug("Not forcing a starting state on node %s",
- controld_globals.our_nodename);
+ crm_debug("Not forcing a starting state on node %s", node_name);
} else {
crm_warn("Unrecognized start state '%s', using 'default' (%s)",
- start_state, controld_globals.our_nodename);
+ start_state, node_name);
}
}
static int
update_conn_host_cache(xmlNode *node, void *userdata)
{
const char *remote = crm_element_value(node, XML_ATTR_ID);
const char *conn_host = crm_element_value(node, PCMK__XA_CONN_HOST);
const char *state = crm_element_value(node, XML_CIB_TAG_STATE);
crm_node_t *remote_peer = crm_remote_peer_get(remote);
if (remote_peer == NULL) {
return pcmk_rc_ok;
}
if (conn_host != NULL) {
pcmk__str_update(&remote_peer->conn_host, conn_host);
}
if (state != NULL) {
pcmk__update_peer_state(__func__, remote_peer, state, 0);
}
return pcmk_rc_ok;
}
/* A_CL_JOIN_RESULT */
/* aka. this is notification that we have (or have not) been accepted */
void
do_cl_join_finalize_respond(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
xmlNode *tmp1 = NULL;
gboolean was_nack = TRUE;
static gboolean first_join = TRUE;
ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE);
int join_id = -1;
const char *op = crm_element_value(input->msg, F_CRM_TASK);
const char *welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM);
if (!pcmk__str_eq(op, CRM_OP_JOIN_ACKNAK, pcmk__str_casei)) {
crm_trace("Ignoring op=%s message", op);
return;
}
/* calculate if it was an ack or a nack */
if (pcmk__xe_attr_is_true(input->msg, CRM_OP_JOIN_ACKNAK)) {
was_nack = FALSE;
}
crm_element_value_int(input->msg, F_CRM_JOIN_ID, &join_id);
if (was_nack) {
crm_err("Shutting down because cluster join with leader %s failed "
CRM_XS" join-%d NACK'd", welcome_from, join_id);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
controld_set_fsa_input_flags(R_STAYDOWN);
return;
}
if (!AM_I_DC
&& pcmk__str_eq(welcome_from, controld_globals.our_nodename,
pcmk__str_casei)) {
crm_warn("Discarding our own welcome - we're no longer the DC");
return;
}
if (update_dc(input->msg) == FALSE) {
crm_warn("Discarding %s from node %s (expected from %s)",
op, welcome_from, controld_globals.dc_name);
return;
}
update_dc_expected(input->msg);
/* record the node's feature set as a transient attribute */
update_attrd(controld_globals.our_nodename, CRM_ATTR_FEATURE_SET,
CRM_FEATURE_SET, NULL, FALSE);
/* send our status section to the DC */
tmp1 = controld_query_executor_state();
if (tmp1 != NULL) {
xmlNode *remotes = NULL;
xmlNode *reply = create_request(CRM_OP_JOIN_CONFIRM, tmp1,
controld_globals.dc_name, CRM_SYSTEM_DC,
CRM_SYSTEM_CRMD, NULL);
crm_xml_add_int(reply, F_CRM_JOIN_ID, join_id);
crm_debug("Confirming join-%d: sending local operation history to %s",
join_id, controld_globals.dc_name);
/*
* If this is the node's first join since the controller started on it,
* set its initial state (standby or member) according to the user's
* preference.
*
* We do not clear the LRM history here. Even if the DC failed to do it
* when we last left, removing them here creates a race condition if the
* controller is being recovered. Instead of a list of active resources
* from the executor, we may end up with a blank status section. If we
* are _NOT_ lucky, we will probe for the "wrong" instance of anonymous
* clones and end up with multiple active instances on the machine.
*/
if (first_join
&& !pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
first_join = FALSE;
if (start_state) {
- set_join_state(start_state);
+ set_join_state(start_state, controld_globals.our_nodename,
+ controld_globals.our_uuid, false);
}
}
send_cluster_message(crm_get_peer(0, controld_globals.dc_name),
crm_msg_crmd, reply, TRUE);
free_xml(reply);
if (AM_I_DC == FALSE) {
register_fsa_input_adv(cause, I_NOT_DC, NULL, A_NOTHING, TRUE,
__func__);
}
free_xml(tmp1);
/* Update the remote node cache with information about which node
* is hosting the connection.
*/
remotes = pcmk__xe_match(input->msg, XML_CIB_TAG_NODES, NULL, NULL);
if (remotes != NULL) {
pcmk__xe_foreach_child(remotes, XML_CIB_TAG_NODE, update_conn_host_cache, NULL);
}
} else {
crm_err("Could not confirm join-%d with %s: Local operation history "
"failed", join_id, controld_globals.dc_name);
register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
}
}
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index f24b755de1..8ab1e4657e 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1440 +1,1455 @@
/*
* Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
#include <crm/lrmd.h>
#include <crm/lrmd_internal.h>
#include <crm/services.h>
#include <pacemaker-controld.h>
#define REMOTE_LRMD_RA "remote"
/* The max start timeout before cmd retry */
#define MAX_START_TIMEOUT_MS 10000
#define cmd_set_flags(cmd, flags_to_set) do { \
(cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"Remote command", (cmd)->rsc_id, (cmd)->status, \
(flags_to_set), #flags_to_set); \
} while (0)
#define cmd_clear_flags(cmd, flags_to_clear) do { \
(cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"Remote command", (cmd)->rsc_id, (cmd)->status, \
(flags_to_clear), #flags_to_clear); \
} while (0)
enum remote_cmd_status {
cmd_reported_success = (1 << 0),
cmd_cancel = (1 << 1),
};
typedef struct remote_ra_cmd_s {
/*! the local node the cmd is issued from */
char *owner;
/*! the remote node the cmd is executed on */
char *rsc_id;
/*! the action to execute */
char *action;
/*! some string the client wants us to give it back */
char *userdata;
/*! start delay in ms */
int start_delay;
/*! timer id used for start delay. */
int delay_id;
/*! timeout in ms for cmd */
int timeout;
int remaining_timeout;
/*! recurring interval in ms */
guint interval_ms;
/*! interval timer id */
int interval_id;
int monitor_timeout_id;
int takeover_timeout_id;
/*! action parameters */
lrmd_key_value_t *params;
pcmk__action_result_t result;
int call_id;
time_t start_time;
uint32_t status;
} remote_ra_cmd_t;
#define lrm_remote_set_flags(lrm_state, flags_to_set) do { \
lrm_state_t *lrm = (lrm_state); \
remote_ra_data_t *ra = lrm->remote_ra_data; \
ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
lrm->node_name, ra->status, \
(flags_to_set), #flags_to_set); \
} while (0)
#define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \
lrm_state_t *lrm = (lrm_state); \
remote_ra_data_t *ra = lrm->remote_ra_data; \
ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
lrm->node_name, ra->status, \
(flags_to_clear), #flags_to_clear); \
} while (0)
enum remote_status {
expect_takeover = (1 << 0),
takeover_complete = (1 << 1),
remote_active = (1 << 2),
/* Maintenance mode is difficult to determine from the controller's context,
* so we have it signalled back with the transition from the scheduler.
*/
remote_in_maint = (1 << 3),
/* Similar for whether we are controlling a guest node or remote node.
* Fortunately there is a meta-attribute in the transition already and
* as the situation doesn't change over time we can use the
* resource start for noting down the information for later use when
* the attributes aren't at hand.
*/
controlling_guest = (1 << 4),
};
typedef struct remote_ra_data_s {
crm_trigger_t *work;
remote_ra_cmd_t *cur_cmd;
GList *cmds;
GList *recurring_cmds;
uint32_t status;
} remote_ra_data_t;
static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
static GList *fail_all_monitor_cmds(GList * list);
static void
free_cmd(gpointer user_data)
{
remote_ra_cmd_t *cmd = user_data;
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
}
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
}
if (cmd->takeover_timeout_id) {
g_source_remove(cmd->takeover_timeout_id);
}
free(cmd->owner);
free(cmd->rsc_id);
free(cmd->action);
free(cmd->userdata);
pcmk__reset_result(&(cmd->result));
lrmd_key_value_freeall(cmd->params);
free(cmd);
}
static int
generate_callid(void)
{
static int remote_ra_callid = 0;
remote_ra_callid++;
if (remote_ra_callid <= 0) {
remote_ra_callid = 1;
}
return remote_ra_callid;
}
static gboolean
recurring_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->interval_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static gboolean
start_delay_helper(gpointer data)
{
remote_ra_cmd_t *cmd = data;
lrm_state_t *connection_rsc = NULL;
cmd->delay_id = 0;
connection_rsc = lrm_state_find(cmd->rsc_id);
if (connection_rsc && connection_rsc->remote_ra_data) {
remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
mainloop_set_trigger(ra_data->work);
}
return FALSE;
}
static bool
should_purge_attributes(crm_node_t *node)
{
bool purge = true;
crm_node_t *conn_node = NULL;
lrm_state_t *connection_rsc = NULL;
if (!node->conn_host) {
return purge;
}
/* Get the node that was hosting the remote connection resource from the
* peer cache. That's the one we really care about here.
*/
conn_node = crm_get_peer(0, node->conn_host);
if (conn_node == NULL) {
return purge;
}
/* Check the uptime of connection_rsc. If it hasn't been running long
* enough, set purge=true. "Long enough" means it started running earlier
* than the timestamp when we noticed it went away in the first place.
*/
connection_rsc = lrm_state_find(node->uname);
if (connection_rsc != NULL) {
lrmd_t *lrm = connection_rsc->conn;
time_t uptime = lrmd__uptime(lrm);
time_t now = time(NULL);
/* Add 20s of fuzziness to give corosync a while to notice the remote
* host is gone. On various error conditions (failure to get uptime,
* peer_lost isn't set) we default to purging.
*/
if (uptime > 0 &&
conn_node->peer_lost > 0 &&
uptime + 20 >= now - conn_node->peer_lost) {
purge = false;
}
}
return purge;
}
static enum controld_section_e
section_to_delete(bool purge)
{
if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
if (purge) {
return controld_section_all_unlocked;
} else {
return controld_section_lrm_unlocked;
}
} else {
if (purge) {
return controld_section_all;
} else {
return controld_section_lrm;
}
}
}
static void
purge_remote_node_attrs(int call_opt, crm_node_t *node)
{
bool purge = should_purge_attributes(node);
enum controld_section_e section = section_to_delete(purge);
/* Purge node from attrd's memory */
if (purge) {
update_attrd_remote_node_removed(node->uname, NULL);
}
controld_delete_node_state(node->uname, section, call_opt);
}
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node joining
*
* \param[in] node_name Name of newly integrated pacemaker_remote node
*/
static void
remote_node_up(const char *node_name)
{
int call_opt;
xmlNode *update, *state;
crm_node_t *node;
+ lrm_state_t *connection_rsc = NULL;
CRM_CHECK(node_name != NULL, return);
crm_info("Announcing Pacemaker Remote node %s", node_name);
call_opt = crmd_cib_smart_opt();
/* Delete node's probe_complete attribute. This serves two purposes:
*
* - @COMPAT DCs < 1.1.14 in a rolling upgrade might use it
* - deleting it (or any attribute for that matter) here ensures the
* attribute manager learns the node is remote
*/
update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
/* Ensure node is in the remote peer cache with member status */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
purge_remote_node_attrs(call_opt, node);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
+ /* Apply any start state that we were given from the environment on the
+ * remote node.
+ */
+ connection_rsc = lrm_state_find(node->uname);
+
+ if (connection_rsc != NULL) {
+ lrmd_t *lrm = connection_rsc->conn;
+ const char *start_state = lrmd__node_start_state(lrm);
+
+ if (start_state) {
+ set_join_state(start_state, node->uname, node->uuid, true);
+ }
+ }
+
/* pacemaker_remote nodes don't participate in the membership layer,
* so cluster nodes don't automatically get notified when they come and go.
* We send a cluster message to the DC, and update the CIB node state entry,
* so the DC will get it sooner (via message) or later (via CIB refresh),
* and any other interested parties can query the CIB.
*/
broadcast_remote_state_message(node_name, true);
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
state = create_node_state_update(node, node_update_cluster, update,
__func__);
/* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
* needs to be fenced, this flag will allow various actions to determine
* whether the fencing has happened yet.
*/
crm_xml_add(state, XML_NODE_IS_FENCED, "0");
/* TODO: If the remote connection drops, and this (async) CIB update either
* failed or has not yet completed, later actions could mistakenly think the
* node has already been fenced (if the XML_NODE_IS_FENCED attribute was
* previously set, because it won't have been cleared). This could prevent
* actual fencing or allow recurring monitor failures to be cleared too
* soon. Ideally, we wouldn't rely on the CIB for the fenced status.
*/
controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL);
free_xml(update);
}
enum down_opts {
DOWN_KEEP_LRM,
DOWN_ERASE_LRM
};
/*!
* \internal
* \brief Handle cluster communication related to pacemaker_remote node leaving
*
* \param[in] node_name Name of lost node
* \param[in] opts Whether to keep or erase LRM history
*/
static void
remote_node_down(const char *node_name, const enum down_opts opts)
{
xmlNode *update;
int call_opt = crmd_cib_smart_opt();
crm_node_t *node;
/* Purge node from attrd's memory */
update_attrd_remote_node_removed(node_name, NULL);
/* Normally, only node attributes should be erased, and the resource history
* should be kept until the node comes back up. However, after a successful
* fence, we want to clear the history as well, so we don't think resources
* are still running on the node.
*/
if (opts == DOWN_ERASE_LRM) {
controld_delete_node_state(node_name, controld_section_all, call_opt);
} else {
controld_delete_node_state(node_name, controld_section_attrs, call_opt);
}
/* Ensure node is in the remote peer cache with lost state */
node = crm_remote_peer_get(node_name);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
/* Notify DC */
broadcast_remote_state_message(node_name, false);
/* Update CIB node state */
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
create_node_state_update(node, node_update_cluster, update, __func__);
controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL);
free_xml(update);
}
/*!
* \internal
* \brief Handle effects of a remote RA command on node state
*
* \param[in] cmd Completed remote RA command
*/
static void
check_remote_node_state(const remote_ra_cmd_t *cmd)
{
/* Only successful actions can change node state */
if (!pcmk__result_ok(&(cmd->result))) {
return;
}
if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
remote_node_up(cmd->rsc_id);
} else if (pcmk__str_eq(cmd->action, "migrate_from", pcmk__str_casei)) {
/* After a successful migration, we don't need to do remote_node_up()
* because the DC already knows the node is up, and we don't want to
* clear LRM history etc. We do need to add the remote node to this
* host's remote peer cache, because (unless it happens to be DC)
* it hasn't been tracking the remote node, and other code relies on
* the cache to distinguish remote nodes from unseen cluster nodes.
*/
crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
} else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
if (ra_data) {
if (!pcmk_is_set(ra_data->status, takeover_complete)) {
/* Stop means down if we didn't successfully migrate elsewhere */
remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
} else if (AM_I_DC == FALSE) {
/* Only the connection host and DC track node state,
* so if the connection migrated elsewhere and we aren't DC,
* un-cache the node, so we don't have stale info
*/
crm_remote_peer_cache_remove(cmd->rsc_id);
}
}
}
/* We don't do anything for successful monitors, which is correct for
* routine recurring monitors, and for monitors on nodes where the
* connection isn't supposed to be (the cluster will stop the connection in
* that case). However, if the initial probe finds the connection already
* active on the node where we want it, we probably should do
* remote_node_up(). Unfortunately, we can't distinguish that case here.
* Given that connections have to be initiated by the cluster, the chance of
* that should be close to zero.
*/
}
static void
report_remote_ra_result(remote_ra_cmd_t * cmd)
{
lrmd_event_data_t op = { 0, };
check_remote_node_state(cmd);
op.type = lrmd_event_exec_complete;
op.rsc_id = cmd->rsc_id;
op.op_type = cmd->action;
op.user_data = cmd->userdata;
op.timeout = cmd->timeout;
op.interval_ms = cmd->interval_ms;
op.t_run = (unsigned int) cmd->start_time;
op.t_rcchange = (unsigned int) cmd->start_time;
lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status,
cmd->result.exit_reason);
if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) {
op.t_rcchange = (unsigned int) time(NULL);
/* This edge case will likely never ever occur, but if it does the
* result is that a failure will not be processed correctly. This is only
* remotely possible because we are able to detect a connection resource's tcp
* connection has failed at any moment after start has completed. The actual
* recurring operation is just a connectivity ping.
*
* basically, we are not guaranteed that the first successful monitor op and
* a subsequent failed monitor op will not occur in the same timestamp. We have to
* make it look like the operations occurred at separate times though. */
if (op.t_rcchange == op.t_run) {
op.t_rcchange++;
}
}
if (cmd->params) {
lrmd_key_value_t *tmp;
op.params = pcmk__strkey_table(free, free);
for (tmp = cmd->params; tmp; tmp = tmp->next) {
g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
}
}
op.call_id = cmd->call_id;
op.remote_nodename = cmd->owner;
lrm_op_callback(&op);
if (op.params) {
g_hash_table_destroy(op.params);
}
lrmd__reset_result(&op);
}
static void
update_remaining_timeout(remote_ra_cmd_t * cmd)
{
cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
}
static gboolean
retry_start_cmd_cb(gpointer data)
{
lrm_state_t *lrm_state = data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd = NULL;
int rc = ETIME;
if (!ra_data || !ra_data->cur_cmd) {
return FALSE;
}
cmd = ra_data->cur_cmd;
if (!pcmk__strcase_any_of(cmd->action, "start", "migrate_from", NULL)) {
return FALSE;
}
update_remaining_timeout(cmd);
if (cmd->remaining_timeout > 0) {
rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
} else {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"Not enough time remains to retry remote connection");
}
if (rc != pcmk_rc_ok) {
report_remote_ra_result(cmd);
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
ra_data->cur_cmd = NULL;
free_cmd(cmd);
} else {
/* wait for connection event */
}
return FALSE;
}
static gboolean
connection_takeover_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
crm_info("takeover event timed out for node %s", cmd->rsc_id);
cmd->takeover_timeout_id = 0;
lrm_state = lrm_state_find(cmd->rsc_id);
handle_remote_ra_stop(lrm_state, cmd);
free_cmd(cmd);
return FALSE;
}
static gboolean
monitor_timeout_cb(gpointer data)
{
lrm_state_t *lrm_state = NULL;
remote_ra_cmd_t *cmd = data;
lrm_state = lrm_state_find(cmd->rsc_id);
crm_info("Timed out waiting for remote poke response from %s%s",
cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
cmd->monitor_timeout_id = 0;
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT,
"Remote executor did not respond");
if (lrm_state && lrm_state->remote_ra_data) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (ra_data->cur_cmd == cmd) {
ra_data->cur_cmd = NULL;
}
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
}
report_remote_ra_result(cmd);
free_cmd(cmd);
if(lrm_state) {
lrm_state_disconnect(lrm_state);
}
return FALSE;
}
static void
synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
{
lrmd_event_data_t op = { 0, };
if (lrm_state == NULL) {
/* if lrm_state not given assume local */
lrm_state = lrm_state_find(controld_globals.our_nodename);
}
CRM_ASSERT(lrm_state != NULL);
op.type = lrmd_event_exec_complete;
op.rsc_id = rsc_id;
op.op_type = op_type;
op.t_run = (unsigned int) time(NULL);
op.t_rcchange = op.t_run;
op.call_id = generate_callid();
lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
process_lrm_event(lrm_state, &op, NULL, NULL);
}
void
remote_lrm_op_callback(lrmd_event_data_t * op)
{
gboolean cmd_handled = FALSE;
lrm_state_t *lrm_state = NULL;
remote_ra_data_t *ra_data = NULL;
remote_ra_cmd_t *cmd = NULL;
crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
"(%d) status=%s (%d)",
(op->op_type? op->op_type : ""), (op->op_type? " " : ""),
lrmd_event_type2str(op->type), op->remote_nodename,
services_ocf_exitcode_str(op->rc), op->rc,
pcmk_exec_status_str(op->op_status), op->op_status);
lrm_state = lrm_state_find(op->remote_nodename);
if (!lrm_state || !lrm_state->remote_ra_data) {
crm_debug("No state information found for remote connection event");
return;
}
ra_data = lrm_state->remote_ra_data;
if (op->type == lrmd_event_new_client) {
// Another client has connected to the remote daemon
if (pcmk_is_set(ra_data->status, expect_takeover)) {
// Great, we knew this was coming
lrm_remote_clear_flags(lrm_state, expect_takeover);
lrm_remote_set_flags(lrm_state, takeover_complete);
} else {
crm_err("Disconnecting from Pacemaker Remote node %s due to "
"unexpected client takeover", op->remote_nodename);
/* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
/* Do not free lrm_state->conn yet. */
/* It'll be freed in the following stop action. */
lrm_state_disconnect_only(lrm_state);
}
return;
}
/* filter all EXEC events up */
if (op->type == lrmd_event_exec_complete) {
if (pcmk_is_set(ra_data->status, takeover_complete)) {
crm_debug("ignoring event, this connection is taken over by another node");
} else {
lrm_op_callback(op);
}
return;
}
if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
if (!pcmk_is_set(ra_data->status, remote_active)) {
crm_debug("Disconnection from Pacemaker Remote node %s complete",
lrm_state->node_name);
} else if (!remote_ra_is_in_maintenance(lrm_state)) {
crm_err("Lost connection to Pacemaker Remote node %s",
lrm_state->node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
} else {
crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
lrm_state->node_name);
/* Do roughly what a 'stop' on the remote-resource would do */
handle_remote_ra_stop(lrm_state, NULL);
remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
/* now fake the reply of a successful 'stop' */
synthesize_lrmd_success(NULL, lrm_state->node_name, "stop");
}
return;
}
if (!ra_data->cur_cmd) {
crm_debug("no event to match");
return;
}
cmd = ra_data->cur_cmd;
/* Start actions and migrate from actions complete after connection
* comes back to us. */
if (op->type == lrmd_event_connect && pcmk__strcase_any_of(cmd->action, "start",
"migrate_from", NULL)) {
if (op->connection_rc < 0) {
update_remaining_timeout(cmd);
if ((op->connection_rc == -ENOKEY)
|| (op->connection_rc == -EKEYREJECTED)) {
// Hard error, don't retry
pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM,
PCMK_EXEC_ERROR,
pcmk_strerror(op->connection_rc));
} else if (cmd->remaining_timeout > 3000) {
crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
return;
} else {
crm_trace("can't reschedule start, remaining timeout too small %d",
cmd->remaining_timeout);
pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"%s without enough time to retry",
pcmk_strerror(op->connection_rc));
}
} else {
lrm_state_reset_tables(lrm_state, TRUE);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
lrm_remote_set_flags(lrm_state, remote_active);
}
crm_debug("Remote connection event matched %s action", cmd->action);
report_remote_ra_result(cmd);
cmd_handled = TRUE;
} else if (op->type == lrmd_event_poke && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
cmd->monitor_timeout_id = 0;
}
/* Only report success the first time, after that only worry about failures.
* For this function, if we get the poke pack, it is always a success. Pokes
* only fail if the send fails, or the response times out. */
if (!pcmk_is_set(cmd->status, cmd_reported_success)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
cmd_set_flags(cmd, cmd_reported_success);
}
crm_debug("Remote poke event matched %s action", cmd->action);
/* success, keep rescheduling if interval is present. */
if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) {
ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
cmd->interval_id = g_timeout_add(cmd->interval_ms,
recurring_helper, cmd);
cmd = NULL; /* prevent free */
}
cmd_handled = TRUE;
} else if (op->type == lrmd_event_disconnect && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
if (pcmk_is_set(ra_data->status, remote_active) &&
!pcmk_is_set(cmd->status, cmd_cancel)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR,
"Remote connection unexpectedly dropped "
"during monitor");
report_remote_ra_result(cmd);
crm_err("Remote connection to %s unexpectedly dropped during monitor",
lrm_state->node_name);
}
cmd_handled = TRUE;
} else if (op->type == lrmd_event_new_client && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
handle_remote_ra_stop(lrm_state, cmd);
cmd_handled = TRUE;
} else {
crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
}
if (cmd_handled) {
ra_data->cur_cmd = NULL;
if (ra_data->cmds) {
mainloop_set_trigger(ra_data->work);
}
free_cmd(cmd);
}
}
static void
handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
{
remote_ra_data_t *ra_data = NULL;
CRM_ASSERT(lrm_state);
ra_data = lrm_state->remote_ra_data;
if (!pcmk_is_set(ra_data->status, takeover_complete)) {
/* delete pending ops when ever the remote connection is intentionally stopped */
g_hash_table_remove_all(lrm_state->active_ops);
} else {
/* we no longer hold the history if this connection has been migrated,
* however, we keep metadata cache for future use */
lrm_state_reset_tables(lrm_state, FALSE);
}
lrm_remote_clear_flags(lrm_state, remote_active);
lrm_state_disconnect(lrm_state);
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
ra_data->cmds = NULL;
ra_data->recurring_cmds = NULL;
ra_data->cur_cmd = NULL;
if (cmd) {
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
}
}
// \return Standard Pacemaker return code
static int
handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
{
const char *server = NULL;
lrmd_key_value_t *tmp = NULL;
int port = 0;
int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
int rc = pcmk_rc_ok;
for (tmp = cmd->params; tmp; tmp = tmp->next) {
if (pcmk__strcase_any_of(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR,
XML_RSC_ATTR_REMOTE_RA_SERVER, NULL)) {
server = tmp->value;
} else if (pcmk__str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT, pcmk__str_casei)) {
port = atoi(tmp->value);
} else if (pcmk__str_eq(tmp->key, CRM_META "_" XML_RSC_ATTR_CONTAINER, pcmk__str_casei)) {
lrm_remote_set_flags(lrm_state, controlling_guest);
}
}
rc = controld_connect_remote_executor(lrm_state, server, port,
timeout_used);
if (rc != pcmk_rc_ok) {
pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR,
"Could not connect to Pacemaker Remote node %s: %s",
lrm_state->node_name, pcmk_rc_str(rc));
}
return rc;
}
static gboolean
handle_remote_ra_exec(gpointer user_data)
{
int rc = 0;
lrm_state_t *lrm_state = user_data;
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
remote_ra_cmd_t *cmd;
GList *first = NULL;
if (ra_data->cur_cmd) {
/* still waiting on previous cmd */
return TRUE;
}
while (ra_data->cmds) {
first = ra_data->cmds;
cmd = first->data;
if (cmd->delay_id) {
/* still waiting for start delay timer to trip */
return TRUE;
}
ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
g_list_free_1(first);
if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) {
lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
if (handle_remote_ra_start(lrm_state, cmd,
cmd->timeout) == pcmk_rc_ok) {
/* take care of this later when we get async connection result */
crm_debug("Initiated async remote connection, %s action will complete after connect event",
cmd->action);
ra_data->cur_cmd = cmd;
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, "monitor")) {
if (lrm_state_is_connected(lrm_state) == TRUE) {
rc = lrm_state_poke_connection(lrm_state);
if (rc < 0) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, pcmk_strerror(rc));
}
} else {
rc = -1;
pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING,
PCMK_EXEC_DONE, "Remote connection inactive");
}
if (rc == 0) {
crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
cmd->rsc_id);
ra_data->cur_cmd = cmd;
cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
return TRUE;
}
report_remote_ra_result(cmd);
} else if (!strcmp(cmd->action, "stop")) {
if (pcmk_is_set(ra_data->status, expect_takeover)) {
/* briefly wait on stop for the takeover event to occur. If the
* takeover event does not occur during the wait period, that's fine.
* It just means that the remote-node's lrm_status section is going to get
* cleared which will require all the resources running in the remote-node
* to be explicitly re-detected via probe actions. If the takeover does occur
* successfully, then we can leave the status section intact. */
cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
ra_data->cur_cmd = cmd;
return TRUE;
}
handle_remote_ra_stop(lrm_state, cmd);
} else if (!strcmp(cmd->action, "migrate_to")) {
lrm_remote_clear_flags(lrm_state, takeover_complete);
lrm_remote_set_flags(lrm_state, expect_takeover);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
} else if (pcmk__str_any_of(cmd->action, CRMD_ACTION_RELOAD,
CRMD_ACTION_RELOAD_AGENT, NULL)) {
/* Currently the only reloadable parameter is reconnect_interval,
* which is only used by the scheduler via the CIB, so reloads are a
* no-op.
*
* @COMPAT DC <2.1.0: We only need to check for "reload" in case
* we're in a rolling upgrade with a DC scheduling "reload" instead
* of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
* so this would work for that purpose as well.
*/
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
}
free_cmd(cmd);
}
return TRUE;
}
static void
remote_ra_data_init(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = NULL;
if (lrm_state->remote_ra_data) {
return;
}
ra_data = calloc(1, sizeof(remote_ra_data_t));
ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
lrm_state->remote_ra_data = ra_data;
}
void
remote_ra_cleanup(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
if (!ra_data) {
return;
}
if (ra_data->cmds) {
g_list_free_full(ra_data->cmds, free_cmd);
}
if (ra_data->recurring_cmds) {
g_list_free_full(ra_data->recurring_cmds, free_cmd);
}
mainloop_destroy_trigger(ra_data->work);
free(ra_data);
lrm_state->remote_ra_data = NULL;
}
gboolean
is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
{
if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
return TRUE;
}
if ((id != NULL) && (lrm_state_find(id) != NULL)
&& !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) {
return TRUE;
}
return FALSE;
}
lrmd_rsc_info_t *
remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
{
lrmd_rsc_info_t *info = NULL;
if ((lrm_state_find(rsc_id))) {
info = calloc(1, sizeof(lrmd_rsc_info_t));
info->id = strdup(rsc_id);
info->type = strdup(REMOTE_LRMD_RA);
info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
info->provider = strdup("pacemaker");
}
return info;
}
static gboolean
is_remote_ra_supported_action(const char *action)
{
return pcmk__str_any_of(action,
CRMD_ACTION_START,
CRMD_ACTION_STOP,
CRMD_ACTION_STATUS,
CRMD_ACTION_MIGRATE,
CRMD_ACTION_MIGRATED,
CRMD_ACTION_RELOAD_AGENT,
CRMD_ACTION_RELOAD,
NULL);
}
static GList *
fail_all_monitor_cmds(GList * list)
{
GList *rm_list = NULL;
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
rm_list = g_list_append(rm_list, cmd);
}
}
for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, "Lost connection to remote executor");
crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
report_remote_ra_result(cmd);
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
/* frees only the list data, not the cmds */
g_list_free(rm_list);
return list;
}
static GList *
remove_cmd(GList * list, const char *action, guint interval_ms)
{
remote_ra_cmd_t *cmd = NULL;
GList *gIter = NULL;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
break;
}
cmd = NULL;
}
if (cmd) {
list = g_list_remove(list, cmd);
free_cmd(cmd);
}
return list;
}
int
remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
const char *action, guint interval_ms)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_data_t *ra_data = NULL;
connection_rsc = lrm_state_find(rsc_id);
if (!connection_rsc || !connection_rsc->remote_ra_data) {
return -EINVAL;
}
ra_data = connection_rsc->remote_ra_data;
ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
interval_ms);
if (ra_data->cur_cmd &&
(ra_data->cur_cmd->interval_ms == interval_ms) &&
(pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
cmd_set_flags(ra_data->cur_cmd, cmd_cancel);
}
return 0;
}
static remote_ra_cmd_t *
handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
const char *userdata)
{
GList *gIter = NULL;
remote_ra_cmd_t *cmd = NULL;
/* there are 3 places a potential duplicate monitor operation
* could exist.
* 1. recurring_cmds list. where the op is waiting for its next interval
* 2. cmds list, where the op is queued to get executed immediately
* 3. cur_cmd, which means the monitor op is in flight right now.
*/
if (interval_ms == 0) {
return NULL;
}
if (ra_data->cur_cmd &&
!pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
(ra_data->cur_cmd->interval_ms == interval_ms) &&
pcmk__str_eq(ra_data->cur_cmd->action, "monitor", pcmk__str_casei)) {
cmd = ra_data->cur_cmd;
goto handle_dup;
}
for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
goto handle_dup;
}
}
for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
goto handle_dup;
}
}
return NULL;
handle_dup:
crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
cmd->rsc_id, "monitor", interval_ms);
/* update the userdata */
if (userdata) {
free(cmd->userdata);
cmd->userdata = strdup(userdata);
}
/* if we've already reported success, generate a new call id */
if (pcmk_is_set(cmd->status, cmd_reported_success)) {
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
cmd_clear_flags(cmd, cmd_reported_success);
}
/* if we have an interval_id set, that means we are in the process of
* waiting for this cmd's next interval. instead of waiting, cancel
* the timer and execute the action immediately */
if (cmd->interval_id) {
g_source_remove(cmd->interval_id);
cmd->interval_id = 0;
recurring_helper(cmd);
}
return cmd;
}
/*!
* \internal
* \brief Execute an action using the (internal) ocf:pacemaker:remote agent
*
* \param[in] lrm_state Executor state object for remote connection
* \param[in] rsc_id Connection resource ID
* \param[in] action Action to execute
* \param[in] userdata String to copy and pass to execution callback
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in] timeout_ms Action timeout (in milliseconds)
* \param[in] start_delay_ms Delay (in milliseconds) before executing action
* \param[in,out] params Connection resource parameters
* \param[out] call_id Where to store call ID on success
*
* \return Standard Pacemaker return code
* \note This takes ownership of \p params, which should not be used or freed
* after calling this function.
*/
int
controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id,
const char *action, const char *userdata,
guint interval_ms, int timeout_ms,
int start_delay_ms, lrmd_key_value_t *params,
int *call_id)
{
lrm_state_t *connection_rsc = NULL;
remote_ra_cmd_t *cmd = NULL;
remote_ra_data_t *ra_data = NULL;
*call_id = 0;
CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL)
&& (userdata != NULL) && (call_id != NULL),
lrmd_key_value_freeall(params); return EINVAL);
if (!is_remote_ra_supported_action(action)) {
lrmd_key_value_freeall(params);
return EOPNOTSUPP;
}
connection_rsc = lrm_state_find(rsc_id);
if (connection_rsc == NULL) {
lrmd_key_value_freeall(params);
return ENOTCONN;
}
remote_ra_data_init(connection_rsc);
ra_data = connection_rsc->remote_ra_data;
cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
if (cmd) {
*call_id = cmd->call_id;
lrmd_key_value_freeall(params);
return pcmk_rc_ok;
}
cmd = calloc(1, sizeof(remote_ra_cmd_t));
if (cmd == NULL) {
lrmd_key_value_freeall(params);
return ENOMEM;
}
cmd->owner = strdup(lrm_state->node_name);
cmd->rsc_id = strdup(rsc_id);
cmd->action = strdup(action);
cmd->userdata = strdup(userdata);
if ((cmd->owner == NULL) || (cmd->rsc_id == NULL) || (cmd->action == NULL)
|| (cmd->userdata == NULL)) {
free_cmd(cmd);
lrmd_key_value_freeall(params);
return ENOMEM;
}
cmd->interval_ms = interval_ms;
cmd->timeout = timeout_ms;
cmd->start_delay = start_delay_ms;
cmd->params = params;
cmd->start_time = time(NULL);
cmd->call_id = generate_callid();
if (cmd->start_delay) {
cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
ra_data->cmds = g_list_append(ra_data->cmds, cmd);
mainloop_set_trigger(ra_data->work);
*call_id = cmd->call_id;
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Immediately fail all monitors of a remote node, if proxied here
*
* \param[in] node_name Name of pacemaker_remote node
*/
void
remote_ra_fail(const char *node_name)
{
lrm_state_t *lrm_state = lrm_state_find(node_name);
if (lrm_state && lrm_state_is_connected(lrm_state)) {
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
crm_info("Failing monitors on Pacemaker Remote node %s", node_name);
ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
}
}
/* A guest node fencing implied by host fencing looks like:
*
* <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
* on_node="lxc1" on_node_uuid="lxc1">
* <attributes CRM_meta_on_node="lxc1" CRM_meta_on_node_uuid="lxc1"
* CRM_meta_stonith_action="off" crm_feature_set="3.0.12"/>
* <downed>
* <node id="lxc1"/>
* </downed>
* </pseudo_event>
*/
#define XPATH_PSEUDO_FENCE "/" XML_GRAPH_TAG_PSEUDO_EVENT \
"[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \
"/" XML_CIB_TAG_NODE
/*!
* \internal
* \brief Check a pseudo-action for Pacemaker Remote node side effects
*
* \param[in,out] xml XML of pseudo-action to check
*/
void
remote_ra_process_pseudo(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
if (numXpathResults(search) == 1) {
xmlNode *result = getXpathResult(search, 0);
/* Normally, we handle the necessary side effects of a guest node stop
* action when reporting the remote agent's result. However, if the stop
* is implied due to fencing, it will be a fencing pseudo-event, and
* there won't be a result to report. Handle that case here.
*
* This will result in a duplicate call to remote_node_down() if the
* guest stop was real instead of implied, but that shouldn't hurt.
*
* There is still one corner case that isn't handled: if a guest node
* isn't running any resources when its host is fenced, it will appear
* to be cleanly stopped, so there will be no pseudo-fence, and our
* peer cache state will be incorrect unless and until the guest is
* recovered.
*/
if (result) {
const char *remote = ID(result);
if (remote) {
remote_node_down(remote, DOWN_ERASE_LRM);
}
}
}
freeXpathObject(search);
}
static void
remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
{
xmlNode *update, *state;
int call_opt;
crm_node_t *node;
call_opt = crmd_cib_smart_opt();
node = crm_remote_peer_get(lrm_state->node_name);
CRM_CHECK(node != NULL, return);
update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
state = create_node_state_update(node, node_update_none, update,
__func__);
crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
if (controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt,
NULL) == pcmk_rc_ok) {
/* TODO: still not 100% sure that async update will succeed ... */
if (maintenance) {
lrm_remote_set_flags(lrm_state, remote_in_maint);
} else {
lrm_remote_clear_flags(lrm_state, remote_in_maint);
}
}
free_xml(update);
}
#define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
"[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \
XML_GRAPH_TAG_MAINTENANCE
/*!
* \internal
* \brief Check a pseudo-action holding updates for maintenance state
*
* \param[in,out] xml XML of pseudo-action to check
*/
void
remote_ra_process_maintenance_nodes(xmlNode *xml)
{
xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
if (numXpathResults(search) == 1) {
xmlNode *node;
int cnt = 0, cnt_remote = 0;
for (node =
first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
node != NULL; node = pcmk__xml_next(node)) {
lrm_state_t *lrm_state = lrm_state_find(ID(node));
cnt++;
if (lrm_state && lrm_state->remote_ra_data &&
pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) {
int is_maint;
cnt_remote++;
pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE),
&is_maint, 0);
remote_ra_maintenance(lrm_state, is_maint);
}
}
crm_trace("Action holds %d nodes (%d remotes found) "
"adjusting maintenance-mode", cnt, cnt_remote);
}
freeXpathObject(search);
}
gboolean
remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return pcmk_is_set(ra_data->status, remote_in_maint);
}
gboolean
remote_ra_controlling_guest(lrm_state_t * lrm_state)
{
remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
return pcmk_is_set(ra_data->status, controlling_guest);
}
diff --git a/daemons/controld/pacemaker-controld.h b/daemons/controld/pacemaker-controld.h
index 1484a00c60..2334cce321 100644
--- a/daemons/controld/pacemaker-controld.h
+++ b/daemons/controld/pacemaker-controld.h
@@ -1,39 +1,42 @@
/*
* Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRMD__H
# define CRMD__H
#include <controld_alerts.h>
#include <controld_callbacks.h>
#include <controld_cib.h>
#include <controld_fencing.h>
#include <controld_fsa.h>
#include <controld_globals.h>
#include <controld_timers.h>
#include <controld_lrm.h>
#include <controld_membership.h>
#include <controld_messages.h>
#include <controld_metadata.h>
#include <controld_throttle.h>
#include <controld_transition.h>
#include <controld_utils.h>
# define controld_trigger_config() \
controld_trigger_config_as(__func__, __LINE__)
void crmd_metadata(void);
void controld_trigger_config_as(const char *fn, int line);
void controld_election_init(const char *uname);
void controld_configure_election(GHashTable *options);
void controld_remove_voter(const char *uname);
void controld_election_fini(void);
void controld_stop_current_election_timeout(void);
+void set_join_state(const char *start_state, const char *node_name,
+ const char *node_uuid, bool remote);
+
#endif
diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c
index fa2761eb6b..9a783a58b5 100644
--- a/daemons/execd/execd_commands.c
+++ b/daemons/execd/execd_commands.c
@@ -1,1927 +1,1932 @@
/*
* Copyright 2012-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <crm/fencing/internal.h>
#include <glib.h>
// Check whether we have a high-resolution monotonic clock
#undef PCMK__TIME_USE_CGT
#if HAVE_DECL_CLOCK_MONOTONIC && defined(CLOCK_MONOTONIC)
# define PCMK__TIME_USE_CGT
# include <time.h> /* clock_gettime */
#endif
#include <unistd.h>
#include <crm/crm.h>
#include <crm/fencing/internal.h>
#include <crm/services.h>
#include <crm/services_internal.h>
#include <crm/common/mainloop.h>
#include <crm/common/ipc.h>
#include <crm/common/ipc_internal.h>
#include <crm/msg_xml.h>
#include "pacemaker-execd.h"
GHashTable *rsc_list = NULL;
typedef struct lrmd_cmd_s {
int timeout;
guint interval_ms;
int start_delay;
int timeout_orig;
int call_id;
int call_opts;
/* Timer ids, must be removed on cmd destruction. */
int delay_id;
int stonith_recurring_id;
int rsc_deleted;
int service_flags;
char *client_id;
char *origin;
char *rsc_id;
char *action;
char *real_action;
char *userdata_str;
pcmk__action_result_t result;
/* We can track operation queue time and run time, to be saved with the CIB
* resource history (and displayed in cluster status). We need
* high-resolution monotonic time for this purpose, so we use
* clock_gettime(CLOCK_MONOTONIC, ...) (if available, otherwise this feature
* is disabled).
*
* However, we also need epoch timestamps for recording the time the command
* last ran and the time its return value last changed, for use in time
* displays (as opposed to interval calculations). We keep time_t values for
* this purpose.
*
* The last run time is used for both purposes, so we keep redundant
* monotonic and epoch values for this. Technically the two could represent
* different times, but since time_t has only second resolution and the
* values are used for distinct purposes, that is not significant.
*/
#ifdef PCMK__TIME_USE_CGT
/* Recurring and systemd operations may involve more than one executor
* command per operation, so they need info about the original and the most
* recent.
*/
struct timespec t_first_run; // When op first ran
struct timespec t_run; // When op most recently ran
struct timespec t_first_queue; // When op was first queued
struct timespec t_queue; // When op was most recently queued
#endif
time_t epoch_last_run; // Epoch timestamp of when op last ran
time_t epoch_rcchange; // Epoch timestamp of when rc last changed
bool first_notify_sent;
int last_notify_rc;
int last_notify_op_status;
int last_pid;
GHashTable *params;
} lrmd_cmd_t;
static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc);
static gboolean execute_resource_action(gpointer user_data);
static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id);
#ifdef PCMK__TIME_USE_CGT
/*!
* \internal
* \brief Check whether a struct timespec has been set
*
* \param[in] timespec Time to check
*
* \return true if timespec has been set (i.e. is nonzero), false otherwise
*/
static inline bool
time_is_set(const struct timespec *timespec)
{
return (timespec != NULL) &&
((timespec->tv_sec != 0) || (timespec->tv_nsec != 0));
}
/*
* \internal
* \brief Set a timespec (and its original if unset) to the current time
*
* \param[out] t_current Where to store current time
* \param[out] t_orig Where to copy t_current if unset
*/
static void
get_current_time(struct timespec *t_current, struct timespec *t_orig)
{
clock_gettime(CLOCK_MONOTONIC, t_current);
if ((t_orig != NULL) && !time_is_set(t_orig)) {
*t_orig = *t_current;
}
}
/*!
* \internal
* \brief Return difference between two times in milliseconds
*
* \param[in] now More recent time (or NULL to use current time)
* \param[in] old Earlier time
*
* \return milliseconds difference (or 0 if old is NULL or unset)
*
* \note Can overflow on 32bit machines when the differences is around
* 24 days or more.
*/
static int
time_diff_ms(const struct timespec *now, const struct timespec *old)
{
int diff_ms = 0;
if (time_is_set(old)) {
struct timespec local_now = { 0, };
if (now == NULL) {
clock_gettime(CLOCK_MONOTONIC, &local_now);
now = &local_now;
}
diff_ms = (now->tv_sec - old->tv_sec) * 1000
+ (now->tv_nsec - old->tv_nsec) / 1000000;
}
return diff_ms;
}
/*!
* \internal
* \brief Reset a command's operation times to their original values.
*
* Reset a command's run and queued timestamps to the timestamps of the original
* command, so we report the entire time since then and not just the time since
* the most recent command (for recurring and systemd operations).
*
* \param[in,out] cmd Executor command object to reset
*
* \note It's not obvious what the queued time should be for a systemd
* start/stop operation, which might go like this:
* initial command queued 5ms, runs 3s
* monitor command queued 10ms, runs 10s
* monitor command queued 10ms, runs 10s
* Is the queued time for that operation 5ms, 10ms or 25ms? The current
* implementation will report 5ms. If it's 25ms, then we need to
* subtract 20ms from the total exec time so as not to count it twice.
* We can implement that later if it matters to anyone ...
*/
static void
cmd_original_times(lrmd_cmd_t * cmd)
{
cmd->t_run = cmd->t_first_run;
cmd->t_queue = cmd->t_first_queue;
}
#endif
static inline bool
action_matches(const lrmd_cmd_t *cmd, const char *action, guint interval_ms)
{
return (cmd->interval_ms == interval_ms)
&& pcmk__str_eq(cmd->action, action, pcmk__str_casei);
}
/*!
* \internal
* \brief Log the result of an asynchronous command
*
* \param[in] cmd Command to log result for
* \param[in] exec_time_ms Execution time in milliseconds, if known
* \param[in] queue_time_ms Queue time in milliseconds, if known
*/
static void
log_finished(const lrmd_cmd_t *cmd, int exec_time_ms, int queue_time_ms)
{
int log_level = LOG_INFO;
GString *str = g_string_sized_new(100); // reasonable starting size
if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
log_level = LOG_DEBUG;
}
g_string_append_printf(str, "%s %s (call %d",
cmd->rsc_id, cmd->action, cmd->call_id);
if (cmd->last_pid != 0) {
g_string_append_printf(str, ", PID %d", cmd->last_pid);
}
if (cmd->result.execution_status == PCMK_EXEC_DONE) {
g_string_append_printf(str, ") exited with status %d",
cmd->result.exit_status);
} else {
pcmk__g_strcat(str, ") could not be executed: ",
pcmk_exec_status_str(cmd->result.execution_status),
NULL);
}
if (cmd->result.exit_reason != NULL) {
pcmk__g_strcat(str, " (", cmd->result.exit_reason, ")", NULL);
}
#ifdef PCMK__TIME_USE_CGT
pcmk__g_strcat(str, " (execution time ",
pcmk__readable_interval(exec_time_ms), NULL);
if (queue_time_ms > 0) {
pcmk__g_strcat(str, " after being queued ",
pcmk__readable_interval(queue_time_ms), NULL);
}
g_string_append_c(str, ')');
#endif
do_crm_log(log_level, "%s", str->str);
g_string_free(str, TRUE);
}
static void
log_execute(lrmd_cmd_t * cmd)
{
int log_level = LOG_INFO;
if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
log_level = LOG_DEBUG;
}
do_crm_log(log_level, "executing - rsc:%s action:%s call_id:%d",
cmd->rsc_id, cmd->action, cmd->call_id);
}
static const char *
normalize_action_name(lrmd_rsc_t * rsc, const char *action)
{
if (pcmk__str_eq(action, "monitor", pcmk__str_casei) &&
pcmk_is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) {
return "status";
}
return action;
}
static lrmd_rsc_t *
build_rsc_from_xml(xmlNode * msg)
{
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR);
lrmd_rsc_t *rsc = NULL;
rsc = calloc(1, sizeof(lrmd_rsc_t));
crm_element_value_int(msg, F_LRMD_CALLOPTS, &rsc->call_opts);
rsc->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID);
rsc->class = crm_element_value_copy(rsc_xml, F_LRMD_CLASS);
rsc->provider = crm_element_value_copy(rsc_xml, F_LRMD_PROVIDER);
rsc->type = crm_element_value_copy(rsc_xml, F_LRMD_TYPE);
rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, execute_resource_action,
rsc);
// Initialize fence device probes (to return "not running")
pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR,
PCMK_EXEC_NO_FENCE_DEVICE, NULL);
return rsc;
}
static lrmd_cmd_t *
create_lrmd_cmd(xmlNode *msg, pcmk__client_t *client)
{
int call_options = 0;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, msg, LOG_ERR);
lrmd_cmd_t *cmd = NULL;
cmd = calloc(1, sizeof(lrmd_cmd_t));
crm_element_value_int(msg, F_LRMD_CALLOPTS, &call_options);
cmd->call_opts = call_options;
cmd->client_id = strdup(client->id);
crm_element_value_int(msg, F_LRMD_CALLID, &cmd->call_id);
crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &cmd->interval_ms);
crm_element_value_int(rsc_xml, F_LRMD_TIMEOUT, &cmd->timeout);
crm_element_value_int(rsc_xml, F_LRMD_RSC_START_DELAY, &cmd->start_delay);
cmd->timeout_orig = cmd->timeout;
cmd->origin = crm_element_value_copy(rsc_xml, F_LRMD_ORIGIN);
cmd->action = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ACTION);
cmd->userdata_str = crm_element_value_copy(rsc_xml, F_LRMD_RSC_USERDATA_STR);
cmd->rsc_id = crm_element_value_copy(rsc_xml, F_LRMD_RSC_ID);
cmd->params = xml2list(rsc_xml);
if (pcmk__str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), "block", pcmk__str_casei)) {
crm_debug("Setting flag to leave pid group on timeout and "
"only kill action pid for " PCMK__OP_FMT,
cmd->rsc_id, cmd->action, cmd->interval_ms);
cmd->service_flags = pcmk__set_flags_as(__func__, __LINE__,
LOG_TRACE, "Action",
cmd->action, 0,
SVC_ACTION_LEAVE_GROUP,
"SVC_ACTION_LEAVE_GROUP");
}
return cmd;
}
static void
stop_recurring_timer(lrmd_cmd_t *cmd)
{
if (cmd) {
if (cmd->stonith_recurring_id) {
g_source_remove(cmd->stonith_recurring_id);
}
cmd->stonith_recurring_id = 0;
}
}
static void
free_lrmd_cmd(lrmd_cmd_t * cmd)
{
stop_recurring_timer(cmd);
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
if (cmd->params) {
g_hash_table_destroy(cmd->params);
}
pcmk__reset_result(&(cmd->result));
free(cmd->origin);
free(cmd->action);
free(cmd->real_action);
free(cmd->userdata_str);
free(cmd->rsc_id);
free(cmd->client_id);
free(cmd);
}
static gboolean
stonith_recurring_op_helper(gpointer data)
{
lrmd_cmd_t *cmd = data;
lrmd_rsc_t *rsc;
cmd->stonith_recurring_id = 0;
if (!cmd->rsc_id) {
return FALSE;
}
rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id);
CRM_ASSERT(rsc != NULL);
/* take it out of recurring_ops list, and put it in the pending ops
* to be executed */
rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd);
rsc->pending_ops = g_list_append(rsc->pending_ops, cmd);
#ifdef PCMK__TIME_USE_CGT
get_current_time(&(cmd->t_queue), &(cmd->t_first_queue));
#endif
mainloop_set_trigger(rsc->work);
return FALSE;
}
static inline void
start_recurring_timer(lrmd_cmd_t *cmd)
{
if (cmd && (cmd->interval_ms > 0)) {
cmd->stonith_recurring_id = g_timeout_add(cmd->interval_ms,
stonith_recurring_op_helper,
cmd);
}
}
static gboolean
start_delay_helper(gpointer data)
{
lrmd_cmd_t *cmd = data;
lrmd_rsc_t *rsc = NULL;
cmd->delay_id = 0;
rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL;
if (rsc) {
mainloop_set_trigger(rsc->work);
}
return FALSE;
}
/*!
* \internal
* \brief Check whether a list already contains the equivalent of a given action
*
* \param[in] action_list List to search
* \param[in] cmd Action to search for
*/
static lrmd_cmd_t *
find_duplicate_action(const GList *action_list, const lrmd_cmd_t *cmd)
{
for (const GList *item = action_list; item != NULL; item = item->next) {
lrmd_cmd_t *dup = item->data;
if (action_matches(cmd, dup->action, dup->interval_ms)) {
return dup;
}
}
return NULL;
}
static bool
merge_recurring_duplicate(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
{
lrmd_cmd_t * dup = NULL;
bool dup_pending = true;
if (cmd->interval_ms == 0) {
return false;
}
// Search for a duplicate of this action (in-flight or not)
dup = find_duplicate_action(rsc->pending_ops, cmd);
if (dup == NULL) {
dup_pending = false;
dup = find_duplicate_action(rsc->recurring_ops, cmd);
if (dup == NULL) {
return false;
}
}
/* Do not merge fencing monitors marked for cancellation, so we can reply to
* the cancellation separately.
*/
if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH,
pcmk__str_casei)
&& (dup->result.execution_status == PCMK_EXEC_CANCELLED)) {
return false;
}
/* This should not occur. If it does, we need to investigate how something
* like this is possible in the controller.
*/
crm_warn("Duplicate recurring op entry detected (" PCMK__OP_FMT
"), merging with previous op entry",
rsc->rsc_id, normalize_action_name(rsc, dup->action),
dup->interval_ms);
// Merge new action's call ID and user data into existing action
dup->first_notify_sent = false;
free(dup->userdata_str);
dup->userdata_str = cmd->userdata_str;
cmd->userdata_str = NULL;
dup->call_id = cmd->call_id;
free_lrmd_cmd(cmd);
cmd = NULL;
/* If dup is not pending, that means it has already executed at least once
* and is waiting in the interval. In that case, stop waiting and initiate
* a new instance now.
*/
if (!dup_pending) {
if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH,
pcmk__str_casei)) {
stop_recurring_timer(dup);
stonith_recurring_op_helper(dup);
} else {
services_action_kick(rsc->rsc_id,
normalize_action_name(rsc, dup->action),
dup->interval_ms);
}
}
return true;
}
static void
schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
{
CRM_CHECK(cmd != NULL, return);
CRM_CHECK(rsc != NULL, return);
crm_trace("Scheduling %s on %s", cmd->action, rsc->rsc_id);
if (merge_recurring_duplicate(rsc, cmd)) {
// Equivalent of cmd has already been scheduled
return;
}
/* The controller expects the executor to automatically cancel
* recurring operations before a resource stops.
*/
if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
cancel_all_recurring(rsc, NULL);
}
rsc->pending_ops = g_list_append(rsc->pending_ops, cmd);
#ifdef PCMK__TIME_USE_CGT
get_current_time(&(cmd->t_queue), &(cmd->t_first_queue));
#endif
mainloop_set_trigger(rsc->work);
if (cmd->start_delay) {
cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
}
}
static xmlNode *
create_lrmd_reply(const char *origin, int rc, int call_id)
{
xmlNode *reply = create_xml_node(NULL, T_LRMD_REPLY);
crm_xml_add(reply, F_LRMD_ORIGIN, origin);
crm_xml_add_int(reply, F_LRMD_RC, rc);
crm_xml_add_int(reply, F_LRMD_CALLID, call_id);
return reply;
}
static void
send_client_notify(gpointer key, gpointer value, gpointer user_data)
{
xmlNode *update_msg = user_data;
pcmk__client_t *client = value;
int rc;
int log_level = LOG_WARNING;
const char *msg = NULL;
CRM_CHECK(client != NULL, return);
if (client->name == NULL) {
crm_trace("Skipping notification to client without name");
return;
}
if (pcmk_is_set(client->flags, pcmk__client_to_proxy)) {
/* We only want to notify clients of the executor IPC API. If we are
* running as Pacemaker Remote, we may have clients proxied to other
* IPC services in the cluster, so skip those.
*/
crm_trace("Skipping executor API notification to client %s",
pcmk__client_name(client));
return;
}
rc = lrmd_server_send_notify(client, update_msg);
if (rc == pcmk_rc_ok) {
return;
}
switch (rc) {
case ENOTCONN:
case EPIPE: // Client exited without waiting for notification
log_level = LOG_INFO;
msg = "Disconnected";
break;
default:
msg = pcmk_rc_str(rc);
break;
}
do_crm_log(log_level, "Could not notify client %s: %s " CRM_XS " rc=%d",
pcmk__client_name(client), msg, rc);
}
static void
send_cmd_complete_notify(lrmd_cmd_t * cmd)
{
xmlNode *notify = NULL;
int exec_time = 0;
int queue_time = 0;
#ifdef PCMK__TIME_USE_CGT
exec_time = time_diff_ms(NULL, &(cmd->t_run));
queue_time = time_diff_ms(&cmd->t_run, &(cmd->t_queue));
#endif
log_finished(cmd, exec_time, queue_time);
/* If the originator requested to be notified only for changes in recurring
* operation results, skip the notification if the result hasn't changed.
*/
if (cmd->first_notify_sent
&& pcmk_is_set(cmd->call_opts, lrmd_opt_notify_changes_only)
&& (cmd->last_notify_rc == cmd->result.exit_status)
&& (cmd->last_notify_op_status == cmd->result.execution_status)) {
return;
}
cmd->first_notify_sent = true;
cmd->last_notify_rc = cmd->result.exit_status;
cmd->last_notify_op_status = cmd->result.execution_status;
notify = create_xml_node(NULL, T_LRMD_NOTIFY);
crm_xml_add(notify, F_LRMD_ORIGIN, __func__);
crm_xml_add_int(notify, F_LRMD_TIMEOUT, cmd->timeout);
crm_xml_add_ms(notify, F_LRMD_RSC_INTERVAL, cmd->interval_ms);
crm_xml_add_int(notify, F_LRMD_RSC_START_DELAY, cmd->start_delay);
crm_xml_add_int(notify, F_LRMD_EXEC_RC, cmd->result.exit_status);
crm_xml_add_int(notify, F_LRMD_OP_STATUS, cmd->result.execution_status);
crm_xml_add_int(notify, F_LRMD_CALLID, cmd->call_id);
crm_xml_add_int(notify, F_LRMD_RSC_DELETED, cmd->rsc_deleted);
crm_xml_add_ll(notify, F_LRMD_RSC_RUN_TIME,
(long long) cmd->epoch_last_run);
crm_xml_add_ll(notify, F_LRMD_RSC_RCCHANGE_TIME,
(long long) cmd->epoch_rcchange);
#ifdef PCMK__TIME_USE_CGT
crm_xml_add_int(notify, F_LRMD_RSC_EXEC_TIME, exec_time);
crm_xml_add_int(notify, F_LRMD_RSC_QUEUE_TIME, queue_time);
#endif
crm_xml_add(notify, F_LRMD_OPERATION, LRMD_OP_RSC_EXEC);
crm_xml_add(notify, F_LRMD_RSC_ID, cmd->rsc_id);
if(cmd->real_action) {
crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->real_action);
} else {
crm_xml_add(notify, F_LRMD_RSC_ACTION, cmd->action);
}
crm_xml_add(notify, F_LRMD_RSC_USERDATA_STR, cmd->userdata_str);
crm_xml_add(notify, F_LRMD_RSC_EXIT_REASON, cmd->result.exit_reason);
if (cmd->result.action_stderr != NULL) {
crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->result.action_stderr);
} else if (cmd->result.action_stdout != NULL) {
crm_xml_add(notify, F_LRMD_RSC_OUTPUT, cmd->result.action_stdout);
}
if (cmd->params) {
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
xmlNode *args = create_xml_node(notify, XML_TAG_ATTRS);
g_hash_table_iter_init(&iter, cmd->params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
hash2smartfield((gpointer) key, (gpointer) value, args);
}
}
if ((cmd->client_id != NULL)
&& pcmk_is_set(cmd->call_opts, lrmd_opt_notify_orig_only)) {
pcmk__client_t *client = pcmk__find_client_by_id(cmd->client_id);
if (client != NULL) {
send_client_notify(client->id, client, notify);
}
} else {
pcmk__foreach_ipc_client(send_client_notify, notify);
}
free_xml(notify);
}
static void
send_generic_notify(int rc, xmlNode * request)
{
if (pcmk__ipc_client_count() != 0) {
int call_id = 0;
xmlNode *notify = NULL;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
const char *op = crm_element_value(request, F_LRMD_OPERATION);
crm_element_value_int(request, F_LRMD_CALLID, &call_id);
notify = create_xml_node(NULL, T_LRMD_NOTIFY);
crm_xml_add(notify, F_LRMD_ORIGIN, __func__);
crm_xml_add_int(notify, F_LRMD_RC, rc);
crm_xml_add_int(notify, F_LRMD_CALLID, call_id);
crm_xml_add(notify, F_LRMD_OPERATION, op);
crm_xml_add(notify, F_LRMD_RSC_ID, rsc_id);
pcmk__foreach_ipc_client(send_client_notify, notify);
free_xml(notify);
}
}
static void
cmd_reset(lrmd_cmd_t * cmd)
{
cmd->last_pid = 0;
#ifdef PCMK__TIME_USE_CGT
memset(&cmd->t_run, 0, sizeof(cmd->t_run));
memset(&cmd->t_queue, 0, sizeof(cmd->t_queue));
#endif
cmd->epoch_last_run = 0;
pcmk__reset_result(&(cmd->result));
cmd->result.execution_status = PCMK_EXEC_DONE;
}
static void
cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc)
{
crm_trace("Resource operation rsc:%s action:%s completed (%p %p)", cmd->rsc_id, cmd->action,
rsc ? rsc->active : NULL, cmd);
if (rsc && (rsc->active == cmd)) {
rsc->active = NULL;
mainloop_set_trigger(rsc->work);
}
if (!rsc) {
cmd->rsc_deleted = 1;
}
/* reset original timeout so client notification has correct information */
cmd->timeout = cmd->timeout_orig;
send_cmd_complete_notify(cmd);
if ((cmd->interval_ms != 0)
&& (cmd->result.execution_status == PCMK_EXEC_CANCELLED)) {
if (rsc) {
rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd);
rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd);
}
free_lrmd_cmd(cmd);
} else if (cmd->interval_ms == 0) {
if (rsc) {
rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd);
}
free_lrmd_cmd(cmd);
} else {
/* Clear all the values pertaining just to the last iteration of a recurring op. */
cmd_reset(cmd);
}
}
struct notify_new_client_data {
xmlNode *notify;
pcmk__client_t *new_client;
};
static void
notify_one_client(gpointer key, gpointer value, gpointer user_data)
{
pcmk__client_t *client = value;
struct notify_new_client_data *data = user_data;
if (!pcmk__str_eq(client->id, data->new_client->id, pcmk__str_casei)) {
send_client_notify(key, (gpointer) client, (gpointer) data->notify);
}
}
void
notify_of_new_client(pcmk__client_t *new_client)
{
struct notify_new_client_data data;
data.new_client = new_client;
data.notify = create_xml_node(NULL, T_LRMD_NOTIFY);
crm_xml_add(data.notify, F_LRMD_ORIGIN, __func__);
crm_xml_add(data.notify, F_LRMD_OPERATION, LRMD_OP_NEW_CLIENT);
pcmk__foreach_ipc_client(notify_one_client, &data);
free_xml(data.notify);
}
void
client_disconnect_cleanup(const char *client_id)
{
GHashTableIter iter;
lrmd_rsc_t *rsc = NULL;
char *key = NULL;
g_hash_table_iter_init(&iter, rsc_list);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) {
if (pcmk_all_flags_set(rsc->call_opts, lrmd_opt_drop_recurring)) {
/* This client is disconnecting, drop any recurring operations
* it may have initiated on the resource */
cancel_all_recurring(rsc, client_id);
}
}
}
static void
action_complete(svc_action_t * action)
{
lrmd_rsc_t *rsc;
lrmd_cmd_t *cmd = action->cb_data;
enum ocf_exitcode code;
#ifdef PCMK__TIME_USE_CGT
const char *rclass = NULL;
bool goagain = false;
#endif
if (!cmd) {
crm_err("Completed executor action (%s) does not match any known operations",
action->id);
return;
}
#ifdef PCMK__TIME_USE_CGT
if (cmd->result.exit_status != action->rc) {
cmd->epoch_rcchange = time(NULL);
}
#endif
cmd->last_pid = action->pid;
// Cast variable instead of function return to keep compilers happy
code = services_result2ocf(action->standard, cmd->action, action->rc);
pcmk__set_result(&(cmd->result), (int) code,
action->status, services__exit_reason(action));
rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL;
#ifdef PCMK__TIME_USE_CGT
if (rsc && pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
rclass = resources_find_service_class(rsc->type);
} else if(rsc) {
rclass = rsc->class;
}
if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) {
if (pcmk__result_ok(&(cmd->result))
&& pcmk__strcase_any_of(cmd->action, "start", "stop", NULL)) {
/* systemd returns from start and stop actions after the action
* begins, not after it completes. We have to jump through a few
* hoops so that we don't report 'complete' to the rest of pacemaker
* until it's actually done.
*/
goagain = true;
cmd->real_action = cmd->action;
cmd->action = strdup("monitor");
} else if (cmd->real_action != NULL) {
// This is follow-up monitor to check whether start/stop completed
if (cmd->result.execution_status == PCMK_EXEC_PENDING) {
goagain = true;
} else if (pcmk__result_ok(&(cmd->result))
&& pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) {
goagain = true;
} else {
int time_sum = time_diff_ms(NULL, &(cmd->t_first_run));
int timeout_left = cmd->timeout_orig - time_sum;
crm_debug("%s systemd %s is now complete (elapsed=%dms, "
"remaining=%dms): %s (%d)",
cmd->rsc_id, cmd->real_action, time_sum, timeout_left,
services_ocf_exitcode_str(cmd->result.exit_status),
cmd->result.exit_status);
cmd_original_times(cmd);
// Monitors may return "not running", but start/stop shouldn't
if ((cmd->result.execution_status == PCMK_EXEC_DONE)
&& (cmd->result.exit_status == PCMK_OCF_NOT_RUNNING)) {
if (pcmk__str_eq(cmd->real_action, "start", pcmk__str_casei)) {
cmd->result.exit_status = PCMK_OCF_UNKNOWN_ERROR;
} else if (pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) {
cmd->result.exit_status = PCMK_OCF_OK;
}
}
}
}
}
#endif
#if SUPPORT_NAGIOS
if (rsc && pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) {
if (action_matches(cmd, "monitor", 0)
&& pcmk__result_ok(&(cmd->result))) {
/* Successfully executed --version for the nagios plugin */
cmd->result.exit_status = PCMK_OCF_NOT_RUNNING;
} else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)
&& !pcmk__result_ok(&(cmd->result))) {
#ifdef PCMK__TIME_USE_CGT
goagain = true;
#endif
}
}
#endif
#ifdef PCMK__TIME_USE_CGT
if (goagain) {
int time_sum = time_diff_ms(NULL, &(cmd->t_first_run));
int timeout_left = cmd->timeout_orig - time_sum;
int delay = cmd->timeout_orig / 10;
if(delay >= timeout_left && timeout_left > 20) {
delay = timeout_left/2;
}
delay = QB_MIN(2000, delay);
if (delay < timeout_left) {
cmd->start_delay = delay;
cmd->timeout = timeout_left;
if (pcmk__result_ok(&(cmd->result))) {
crm_debug("%s %s may still be in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)",
cmd->rsc_id, cmd->real_action, time_sum, timeout_left, delay);
} else if (cmd->result.execution_status == PCMK_EXEC_PENDING) {
crm_info("%s %s is still in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)",
cmd->rsc_id, cmd->action, time_sum, timeout_left, delay);
} else {
crm_notice("%s %s failed '%s' (%d): re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)",
cmd->rsc_id, cmd->action,
services_ocf_exitcode_str(cmd->result.exit_status),
cmd->result.exit_status, time_sum, timeout_left,
delay);
}
cmd_reset(cmd);
if(rsc) {
rsc->active = NULL;
}
schedule_lrmd_cmd(rsc, cmd);
/* Don't finalize cmd, we're not done with it yet */
return;
} else {
crm_notice("Giving up on %s %s (rc=%d): timeout (elapsed=%dms, remaining=%dms)",
cmd->rsc_id,
(cmd->real_action? cmd->real_action : cmd->action),
cmd->result.exit_status, time_sum, timeout_left);
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_TIMEOUT,
"Investigate reason for timeout, and adjust "
"configured operation timeout if necessary");
cmd_original_times(cmd);
}
}
#endif
pcmk__set_result_output(&(cmd->result), services__grab_stdout(action),
services__grab_stderr(action));
cmd_finalize(cmd, rsc);
}
/*!
* \internal
* \brief Process the result of a fence device action (start, stop, or monitor)
*
* \param[in,out] cmd Fence device action that completed
* \param[in] exit_status Fencer API exit status for action
* \param[in] execution_status Fencer API execution status for action
* \param[in] exit_reason Human-friendly detail, if action failed
*/
static void
stonith_action_complete(lrmd_cmd_t *cmd, int exit_status,
enum pcmk_exec_status execution_status,
const char *exit_reason)
{
// This can be NULL if resource was removed before command completed
lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id);
// Simplify fencer exit status to uniform exit status
if (exit_status != CRM_EX_OK) {
exit_status = PCMK_OCF_UNKNOWN_ERROR;
}
if (cmd->result.execution_status == PCMK_EXEC_CANCELLED) {
/* An in-flight fence action was cancelled. The execution status is
* already correct, so don't overwrite it.
*/
execution_status = PCMK_EXEC_CANCELLED;
} else {
/* Some execution status codes have specific meanings for the fencer
* that executor clients may not expect, so map them to a simple error
* status.
*/
switch (execution_status) {
case PCMK_EXEC_NOT_CONNECTED:
case PCMK_EXEC_INVALID:
execution_status = PCMK_EXEC_ERROR;
break;
case PCMK_EXEC_NO_FENCE_DEVICE:
/* This should be possible only for probes in practice, but
* interpret for all actions to be safe.
*/
if (pcmk__str_eq(cmd->action, CRMD_ACTION_STATUS,
pcmk__str_none)) {
exit_status = PCMK_OCF_NOT_RUNNING;
} else if (pcmk__str_eq(cmd->action, CRMD_ACTION_STOP,
pcmk__str_none)) {
exit_status = PCMK_OCF_OK;
} else {
exit_status = PCMK_OCF_NOT_INSTALLED;
}
execution_status = PCMK_EXEC_ERROR;
break;
case PCMK_EXEC_NOT_SUPPORTED:
exit_status = PCMK_OCF_UNIMPLEMENT_FEATURE;
break;
default:
break;
}
}
pcmk__set_result(&cmd->result, exit_status, execution_status, exit_reason);
// Certain successful actions change the known state of the resource
if ((rsc != NULL) && pcmk__result_ok(&(cmd->result))) {
if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
pcmk__set_result(&rsc->fence_probe_result, CRM_EX_OK,
PCMK_EXEC_DONE, NULL); // "running"
} else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR,
PCMK_EXEC_NO_FENCE_DEVICE, NULL); // "not running"
}
}
/* The recurring timer should not be running at this point in any case, but
* as a failsafe, stop it if it is.
*/
stop_recurring_timer(cmd);
/* Reschedule this command if appropriate. If a recurring command is *not*
* rescheduled, its status must be PCMK_EXEC_CANCELLED, otherwise it will
* not be removed from recurring_ops by cmd_finalize().
*/
if (rsc && (cmd->interval_ms > 0)
&& (cmd->result.execution_status != PCMK_EXEC_CANCELLED)) {
start_recurring_timer(cmd);
}
cmd_finalize(cmd, rsc);
}
static void
lrmd_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data)
{
if ((data == NULL) || (data->userdata == NULL)) {
crm_err("Ignoring fence action result: "
"Invalid callback arguments (bug?)");
} else {
stonith_action_complete((lrmd_cmd_t *) data->userdata,
stonith__exit_status(data),
stonith__execution_status(data),
stonith__exit_reason(data));
}
}
void
stonith_connection_failed(void)
{
GHashTableIter iter;
lrmd_rsc_t *rsc = NULL;
crm_warn("Connection to fencer lost (any pending operations for "
"fence devices will be considered failed)");
g_hash_table_iter_init(&iter, rsc_list);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &rsc)) {
if (!pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH,
pcmk__str_none)) {
continue;
}
/* If we registered this fence device, we don't know whether the
* fencer still has the registration or not. Cause future probes to
* return an error until the resource is stopped or started
* successfully. This is especially important if the controller also
* went away (possibly due to a cluster layer restart) and won't
* receive our client notification of any monitors finalized below.
*/
if (rsc->fence_probe_result.execution_status == PCMK_EXEC_DONE) {
pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR,
PCMK_EXEC_NOT_CONNECTED,
"Lost connection to fencer");
}
// Consider any active, pending, or recurring operations as failed
for (GList *op = rsc->recurring_ops; op != NULL; op = op->next) {
lrmd_cmd_t *cmd = op->data;
/* This won't free a recurring op but instead restart its timer.
* If cmd is rsc->active, this will set rsc->active to NULL, so we
* don't have to worry about finalizing it a second time below.
*/
stonith_action_complete(cmd,
CRM_EX_ERROR, PCMK_EXEC_NOT_CONNECTED,
"Lost connection to fencer");
}
if (rsc->active != NULL) {
rsc->pending_ops = g_list_prepend(rsc->pending_ops, rsc->active);
}
while (rsc->pending_ops != NULL) {
// This will free the op and remove it from rsc->pending_ops
stonith_action_complete((lrmd_cmd_t *) rsc->pending_ops->data,
CRM_EX_ERROR, PCMK_EXEC_NOT_CONNECTED,
"Lost connection to fencer");
}
}
}
/*!
* \internal
* \brief Execute a stonith resource "start" action
*
* Start a stonith resource by registering it with the fencer.
* (Stonith agents don't have a start command.)
*
* \param[in,out] stonith_api Connection to fencer
* \param[in] rsc Stonith resource to start
* \param[in] cmd Start command to execute
*
* \return pcmk_ok on success, -errno otherwise
*/
static int
execd_stonith_start(stonith_t *stonith_api, const lrmd_rsc_t *rsc,
const lrmd_cmd_t *cmd)
{
char *key = NULL;
char *value = NULL;
stonith_key_value_t *device_params = NULL;
int rc = pcmk_ok;
// Convert command parameters to stonith API key/values
if (cmd->params) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, cmd->params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
device_params = stonith_key_value_add(device_params, key, value);
}
}
/* The fencer will automatically register devices via CIB notifications
* when the CIB changes, but to avoid a possible race condition between
* the fencer receiving the notification and the executor requesting that
* resource, the executor registers the device as well. The fencer knows how
* to handle duplicate registrations.
*/
rc = stonith_api->cmds->register_device(stonith_api, st_opt_sync_call,
cmd->rsc_id, rsc->provider,
rsc->type, device_params);
stonith_key_value_freeall(device_params, 1, 1);
return rc;
}
/*!
* \internal
* \brief Execute a stonith resource "stop" action
*
* Stop a stonith resource by unregistering it with the fencer.
* (Stonith agents don't have a stop command.)
*
* \param[in,out] stonith_api Connection to fencer
* \param[in] rsc Stonith resource to stop
*
* \return pcmk_ok on success, -errno otherwise
*/
static inline int
execd_stonith_stop(stonith_t *stonith_api, const lrmd_rsc_t *rsc)
{
/* @TODO Failure would indicate a problem communicating with fencer;
* perhaps we should try reconnecting and retrying a few times?
*/
return stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call,
rsc->rsc_id);
}
/*!
* \internal
* \brief Initiate a stonith resource agent recurring "monitor" action
*
* \param[in,out] stonith_api Connection to fencer
* \param[in,out] rsc Stonith resource to monitor
* \param[in] cmd Monitor command being executed
*
* \return pcmk_ok if monitor was successfully initiated, -errno otherwise
*/
static inline int
execd_stonith_monitor(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
{
int rc = stonith_api->cmds->monitor(stonith_api, 0, cmd->rsc_id,
cmd->timeout / 1000);
rc = stonith_api->cmds->register_callback(stonith_api, rc, 0, 0, cmd,
"lrmd_stonith_callback",
lrmd_stonith_callback);
if (rc == TRUE) {
rsc->active = cmd;
rc = pcmk_ok;
} else {
rc = -pcmk_err_generic;
}
return rc;
}
static void
execute_stonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
{
int rc = 0;
bool do_monitor = FALSE;
stonith_t *stonith_api = get_stonith_connection();
if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)
&& (cmd->interval_ms == 0)) {
// Probes don't require a fencer connection
stonith_action_complete(cmd, rsc->fence_probe_result.exit_status,
rsc->fence_probe_result.execution_status,
rsc->fence_probe_result.exit_reason);
return;
} else if (stonith_api == NULL) {
stonith_action_complete(cmd, PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_NOT_CONNECTED,
"No connection to fencer");
return;
} else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
rc = execd_stonith_start(stonith_api, rsc, cmd);
if (rc == pcmk_ok) {
do_monitor = TRUE;
}
} else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
rc = execd_stonith_stop(stonith_api, rsc);
} else if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
do_monitor = TRUE;
} else {
stonith_action_complete(cmd, PCMK_OCF_UNIMPLEMENT_FEATURE,
PCMK_EXEC_ERROR,
"Invalid fence device action (bug?)");
return;
}
if (do_monitor) {
rc = execd_stonith_monitor(stonith_api, rsc, cmd);
if (rc == pcmk_ok) {
// Don't clean up yet, we will find out result of the monitor later
return;
}
}
stonith_action_complete(cmd,
((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR),
stonith__legacy2status(rc),
((rc == -pcmk_err_generic)? NULL : pcmk_strerror(rc)));
}
static void
execute_nonstonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
{
svc_action_t *action = NULL;
GHashTable *params_copy = NULL;
CRM_ASSERT(rsc);
CRM_ASSERT(cmd);
crm_trace("Creating action, resource:%s action:%s class:%s provider:%s agent:%s",
rsc->rsc_id, cmd->action, rsc->class, rsc->provider, rsc->type);
#if SUPPORT_NAGIOS
/* Recurring operations are cancelled anyway for a stop operation */
if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)
&& pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
cmd->result.exit_status = PCMK_OCF_OK;
cmd_finalize(cmd, rsc);
return;
}
#endif
params_copy = pcmk__str_table_dup(cmd->params);
action = services__create_resource_action(rsc->rsc_id, rsc->class, rsc->provider,
rsc->type,
normalize_action_name(rsc, cmd->action),
cmd->interval_ms, cmd->timeout,
params_copy, cmd->service_flags);
if (action == NULL) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
PCMK_EXEC_ERROR, strerror(ENOMEM));
cmd_finalize(cmd, rsc);
return;
}
if (action->rc != PCMK_OCF_UNKNOWN) {
pcmk__set_result(&(cmd->result), action->rc, action->status,
services__exit_reason(action));
services_action_free(action);
cmd_finalize(cmd, rsc);
return;
}
action->cb_data = cmd;
if (services_action_async(action, action_complete)) {
/* The services library has taken responsibility for the action. It
* could be pending, blocked, or merged into a duplicate recurring
* action, in which case the action callback (action_complete())
* will be called when the action completes, otherwise the callback has
* already been called.
*
* action_complete() calls cmd_finalize() which can free cmd, so cmd
* cannot be used here.
*/
} else {
/* This is a recurring action that is not being cancelled and could not
* be initiated. It has been rescheduled, and the action callback
* (action_complete()) has been called, which in this case has already
* called cmd_finalize(), which in this case should only reset (not
* free) cmd.
*/
pcmk__set_result(&(cmd->result), action->rc, action->status,
services__exit_reason(action));
services_action_free(action);
}
}
static gboolean
execute_resource_action(gpointer user_data)
{
lrmd_rsc_t *rsc = (lrmd_rsc_t *) user_data;
lrmd_cmd_t *cmd = NULL;
CRM_CHECK(rsc != NULL, return FALSE);
if (rsc->active) {
crm_trace("%s is still active", rsc->rsc_id);
return TRUE;
}
if (rsc->pending_ops) {
GList *first = rsc->pending_ops;
cmd = first->data;
if (cmd->delay_id) {
crm_trace
("Command %s %s was asked to run too early, waiting for start_delay timeout of %dms",
cmd->rsc_id, cmd->action, cmd->start_delay);
return TRUE;
}
rsc->pending_ops = g_list_remove_link(rsc->pending_ops, first);
g_list_free_1(first);
#ifdef PCMK__TIME_USE_CGT
get_current_time(&(cmd->t_run), &(cmd->t_first_run));
#endif
cmd->epoch_last_run = time(NULL);
}
if (!cmd) {
crm_trace("Nothing further to do for %s", rsc->rsc_id);
return TRUE;
}
rsc->active = cmd; /* only one op at a time for a rsc */
if (cmd->interval_ms) {
rsc->recurring_ops = g_list_append(rsc->recurring_ops, cmd);
}
log_execute(cmd);
if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
execute_stonith_action(rsc, cmd);
} else {
execute_nonstonith_action(rsc, cmd);
}
return TRUE;
}
void
free_rsc(gpointer data)
{
GList *gIter = NULL;
lrmd_rsc_t *rsc = data;
int is_stonith = pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH,
pcmk__str_casei);
gIter = rsc->pending_ops;
while (gIter != NULL) {
GList *next = gIter->next;
lrmd_cmd_t *cmd = gIter->data;
/* command was never executed */
cmd->result.execution_status = PCMK_EXEC_CANCELLED;
cmd_finalize(cmd, NULL);
gIter = next;
}
/* frees list, but not list elements. */
g_list_free(rsc->pending_ops);
gIter = rsc->recurring_ops;
while (gIter != NULL) {
GList *next = gIter->next;
lrmd_cmd_t *cmd = gIter->data;
if (is_stonith) {
cmd->result.execution_status = PCMK_EXEC_CANCELLED;
/* If a stonith command is in-flight, just mark it as cancelled;
* it is not safe to finalize/free the cmd until the stonith api
* says it has either completed or timed out.
*/
if (rsc->active != cmd) {
cmd_finalize(cmd, NULL);
}
} else {
/* This command is already handed off to service library,
* let service library cancel it and tell us via the callback
* when it is cancelled. The rsc can be safely destroyed
* even if we are waiting for the cancel result */
services_action_cancel(rsc->rsc_id,
normalize_action_name(rsc, cmd->action),
cmd->interval_ms);
}
gIter = next;
}
/* frees list, but not list elements. */
g_list_free(rsc->recurring_ops);
free(rsc->rsc_id);
free(rsc->class);
free(rsc->provider);
free(rsc->type);
mainloop_destroy_trigger(rsc->work);
free(rsc);
}
static int
process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id,
xmlNode **reply)
{
int rc = pcmk_ok;
time_t now = time(NULL);
const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION);
+ const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE);
if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) {
crm_err("Cluster API version must be greater than or equal to %s, not %s",
LRMD_MIN_PROTOCOL_VERSION, protocol_version);
rc = -EPROTO;
}
if (pcmk__xe_attr_is_true(request, F_LRMD_IS_IPC_PROVIDER)) {
#ifdef PCMK__COMPILE_REMOTE
if ((client->remote != NULL)
&& pcmk_is_set(client->flags,
pcmk__client_tls_handshake_complete)) {
// This is a remote connection from a cluster node's controller
ipc_proxy_add_provider(client);
} else {
rc = -EACCES;
}
#else
rc = -EPROTONOSUPPORT;
#endif
}
*reply = create_lrmd_reply(__func__, rc, call_id);
crm_xml_add(*reply, F_LRMD_OPERATION, CRM_OP_REGISTER);
crm_xml_add(*reply, F_LRMD_CLIENTID, client->id);
crm_xml_add(*reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION);
crm_xml_add_ll(*reply, PCMK__XA_UPTIME, now - start_time);
+ if (start_state) {
+ crm_xml_add(*reply, PCMK__XA_NODE_START_STATE, start_state);
+ }
+
return rc;
}
static int
process_lrmd_rsc_register(pcmk__client_t *client, uint32_t id, xmlNode *request)
{
int rc = pcmk_ok;
lrmd_rsc_t *rsc = build_rsc_from_xml(request);
lrmd_rsc_t *dup = g_hash_table_lookup(rsc_list, rsc->rsc_id);
if (dup &&
pcmk__str_eq(rsc->class, dup->class, pcmk__str_casei) &&
pcmk__str_eq(rsc->provider, dup->provider, pcmk__str_casei) && pcmk__str_eq(rsc->type, dup->type, pcmk__str_casei)) {
crm_notice("Ignoring duplicate registration of '%s'", rsc->rsc_id);
free_rsc(rsc);
return rc;
}
g_hash_table_replace(rsc_list, rsc->rsc_id, rsc);
crm_info("Cached agent information for '%s'", rsc->rsc_id);
return rc;
}
static xmlNode *
process_lrmd_get_rsc_info(xmlNode *request, int call_id)
{
int rc = pcmk_ok;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
xmlNode *reply = NULL;
lrmd_rsc_t *rsc = NULL;
if (rsc_id == NULL) {
rc = -ENODEV;
} else {
rsc = g_hash_table_lookup(rsc_list, rsc_id);
if (rsc == NULL) {
crm_info("Agent information for '%s' not in cache", rsc_id);
rc = -ENODEV;
}
}
reply = create_lrmd_reply(__func__, rc, call_id);
if (rsc) {
crm_xml_add(reply, F_LRMD_RSC_ID, rsc->rsc_id);
crm_xml_add(reply, F_LRMD_CLASS, rsc->class);
crm_xml_add(reply, F_LRMD_PROVIDER, rsc->provider);
crm_xml_add(reply, F_LRMD_TYPE, rsc->type);
}
return reply;
}
static int
process_lrmd_rsc_unregister(pcmk__client_t *client, uint32_t id,
xmlNode *request)
{
int rc = pcmk_ok;
lrmd_rsc_t *rsc = NULL;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
if (!rsc_id) {
return -ENODEV;
}
rsc = g_hash_table_lookup(rsc_list, rsc_id);
if (rsc == NULL) {
crm_info("Ignoring unregistration of resource '%s', which is not registered",
rsc_id);
return pcmk_ok;
}
if (rsc->active) {
/* let the caller know there are still active ops on this rsc to watch for */
crm_trace("Operation (%p) still in progress for unregistered resource %s",
rsc->active, rsc_id);
rc = -EINPROGRESS;
}
g_hash_table_remove(rsc_list, rsc_id);
return rc;
}
static int
process_lrmd_rsc_exec(pcmk__client_t *client, uint32_t id, xmlNode *request)
{
lrmd_rsc_t *rsc = NULL;
lrmd_cmd_t *cmd = NULL;
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
int call_id;
if (!rsc_id) {
return -EINVAL;
}
if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) {
crm_info("Resource '%s' not found (%d active resources)",
rsc_id, g_hash_table_size(rsc_list));
return -ENODEV;
}
cmd = create_lrmd_cmd(request, client);
call_id = cmd->call_id;
/* Don't reference cmd after handing it off to be scheduled.
* The cmd could get merged and freed. */
schedule_lrmd_cmd(rsc, cmd);
return call_id;
}
static int
cancel_op(const char *rsc_id, const char *action, guint interval_ms)
{
GList *gIter = NULL;
lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, rsc_id);
/* How to cancel an action.
* 1. Check pending ops list, if it hasn't been handed off
* to the service library or stonith recurring list remove
* it there and that will stop it.
* 2. If it isn't in the pending ops list, then it's either a
* recurring op in the stonith recurring list, or the service
* library's recurring list. Stop it there
* 3. If not found in any lists, then this operation has either
* been executed already and is not a recurring operation, or
* never existed.
*/
if (!rsc) {
return -ENODEV;
}
for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) {
lrmd_cmd_t *cmd = gIter->data;
if (action_matches(cmd, action, interval_ms)) {
cmd->result.execution_status = PCMK_EXEC_CANCELLED;
cmd_finalize(cmd, rsc);
return pcmk_ok;
}
}
if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
/* The service library does not handle stonith operations.
* We have to handle recurring stonith operations ourselves. */
for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) {
lrmd_cmd_t *cmd = gIter->data;
if (action_matches(cmd, action, interval_ms)) {
cmd->result.execution_status = PCMK_EXEC_CANCELLED;
if (rsc->active != cmd) {
cmd_finalize(cmd, rsc);
}
return pcmk_ok;
}
}
} else if (services_action_cancel(rsc_id,
normalize_action_name(rsc, action),
interval_ms) == TRUE) {
/* The service library will tell the action_complete callback function
* this action was cancelled, which will destroy the cmd and remove
* it from the recurring_op list. Do not do that in this function
* if the service library says it cancelled it. */
return pcmk_ok;
}
return -EOPNOTSUPP;
}
static void
cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id)
{
GList *cmd_list = NULL;
GList *cmd_iter = NULL;
/* Notice a copy of each list is created when concat is called.
* This prevents odd behavior from occurring when the cmd_list
* is iterated through later on. It is possible the cancel_op
* function may end up modifying the recurring_ops and pending_ops
* lists. If we did not copy those lists, our cmd_list iteration
* could get messed up.*/
if (rsc->recurring_ops) {
cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->recurring_ops));
}
if (rsc->pending_ops) {
cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->pending_ops));
}
if (!cmd_list) {
return;
}
for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) {
lrmd_cmd_t *cmd = cmd_iter->data;
if (cmd->interval_ms == 0) {
continue;
}
if (client_id && !pcmk__str_eq(cmd->client_id, client_id, pcmk__str_casei)) {
continue;
}
cancel_op(rsc->rsc_id, cmd->action, cmd->interval_ms);
}
/* frees only the copied list data, not the cmds */
g_list_free(cmd_list);
}
static int
process_lrmd_rsc_cancel(pcmk__client_t *client, uint32_t id, xmlNode *request)
{
xmlNode *rsc_xml = get_xpath_object("//" F_LRMD_RSC, request, LOG_ERR);
const char *rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
const char *action = crm_element_value(rsc_xml, F_LRMD_RSC_ACTION);
guint interval_ms = 0;
crm_element_value_ms(rsc_xml, F_LRMD_RSC_INTERVAL, &interval_ms);
if (!rsc_id || !action) {
return -EINVAL;
}
return cancel_op(rsc_id, action, interval_ms);
}
static void
add_recurring_op_xml(xmlNode *reply, lrmd_rsc_t *rsc)
{
xmlNode *rsc_xml = create_xml_node(reply, F_LRMD_RSC);
crm_xml_add(rsc_xml, F_LRMD_RSC_ID, rsc->rsc_id);
for (GList *item = rsc->recurring_ops; item != NULL; item = item->next) {
lrmd_cmd_t *cmd = item->data;
xmlNode *op_xml = create_xml_node(rsc_xml, T_LRMD_RSC_OP);
crm_xml_add(op_xml, F_LRMD_RSC_ACTION,
(cmd->real_action? cmd->real_action : cmd->action));
crm_xml_add_ms(op_xml, F_LRMD_RSC_INTERVAL, cmd->interval_ms);
crm_xml_add_int(op_xml, F_LRMD_TIMEOUT, cmd->timeout_orig);
}
}
static xmlNode *
process_lrmd_get_recurring(xmlNode *request, int call_id)
{
int rc = pcmk_ok;
const char *rsc_id = NULL;
lrmd_rsc_t *rsc = NULL;
xmlNode *reply = NULL;
xmlNode *rsc_xml = NULL;
// Resource ID is optional
rsc_xml = first_named_child(request, F_LRMD_CALLDATA);
if (rsc_xml) {
rsc_xml = first_named_child(rsc_xml, F_LRMD_RSC);
}
if (rsc_xml) {
rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
}
// If resource ID is specified, resource must exist
if (rsc_id != NULL) {
rsc = g_hash_table_lookup(rsc_list, rsc_id);
if (rsc == NULL) {
crm_info("Resource '%s' not found (%d active resources)",
rsc_id, g_hash_table_size(rsc_list));
rc = -ENODEV;
}
}
reply = create_lrmd_reply(__func__, rc, call_id);
// If resource ID is not specified, check all resources
if (rsc_id == NULL) {
GHashTableIter iter;
char *key = NULL;
g_hash_table_iter_init(&iter, rsc_list);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &rsc)) {
add_recurring_op_xml(reply, rsc);
}
} else if (rsc) {
add_recurring_op_xml(reply, rsc);
}
return reply;
}
void
process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request)
{
int rc = pcmk_ok;
int call_id = 0;
const char *op = crm_element_value(request, F_LRMD_OPERATION);
int do_reply = 0;
int do_notify = 0;
xmlNode *reply = NULL;
/* Certain IPC commands may be done only by privileged users (i.e. root or
* hacluster), because they would otherwise provide a means of bypassing
* ACLs.
*/
bool allowed = pcmk_is_set(client->flags, pcmk__client_privileged);
crm_trace("Processing %s operation from %s", op, client->id);
crm_element_value_int(request, F_LRMD_CALLID, &call_id);
if (pcmk__str_eq(op, CRM_OP_IPC_FWD, pcmk__str_none)) {
#ifdef PCMK__COMPILE_REMOTE
if (allowed) {
ipc_proxy_forward_client(client, request);
} else {
rc = -EACCES;
}
#else
rc = -EPROTONOSUPPORT;
#endif
do_reply = 1;
} else if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) {
rc = process_lrmd_signon(client, request, call_id, &reply);
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_RSC_REG, pcmk__str_none)) {
if (allowed) {
rc = process_lrmd_rsc_register(client, id, request);
do_notify = 1;
} else {
rc = -EACCES;
}
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_RSC_INFO, pcmk__str_none)) {
if (allowed) {
reply = process_lrmd_get_rsc_info(request, call_id);
} else {
rc = -EACCES;
}
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_RSC_UNREG, pcmk__str_none)) {
if (allowed) {
rc = process_lrmd_rsc_unregister(client, id, request);
/* don't notify anyone about failed un-registers */
if (rc == pcmk_ok || rc == -EINPROGRESS) {
do_notify = 1;
}
} else {
rc = -EACCES;
}
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_RSC_EXEC, pcmk__str_none)) {
if (allowed) {
rc = process_lrmd_rsc_exec(client, id, request);
} else {
rc = -EACCES;
}
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_RSC_CANCEL, pcmk__str_none)) {
if (allowed) {
rc = process_lrmd_rsc_cancel(client, id, request);
} else {
rc = -EACCES;
}
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_POKE, pcmk__str_none)) {
do_notify = 1;
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_CHECK, pcmk__str_none)) {
if (allowed) {
xmlNode *data = get_message_xml(request, F_LRMD_CALLDATA);
CRM_LOG_ASSERT(data != NULL);
pcmk__valid_sbd_timeout(crm_element_value(data, F_LRMD_WATCHDOG));
} else {
rc = -EACCES;
}
} else if (pcmk__str_eq(op, LRMD_OP_ALERT_EXEC, pcmk__str_none)) {
if (allowed) {
rc = process_lrmd_alert_exec(client, id, request);
} else {
rc = -EACCES;
}
do_reply = 1;
} else if (pcmk__str_eq(op, LRMD_OP_GET_RECURRING, pcmk__str_none)) {
if (allowed) {
reply = process_lrmd_get_recurring(request, call_id);
} else {
rc = -EACCES;
}
do_reply = 1;
} else {
rc = -EOPNOTSUPP;
do_reply = 1;
crm_err("Unknown IPC request '%s' from client %s",
op, pcmk__client_name(client));
}
if (rc == -EACCES) {
crm_warn("Rejecting IPC request '%s' from unprivileged client %s",
op, pcmk__client_name(client));
}
crm_debug("Processed %s operation from %s: rc=%d, reply=%d, notify=%d",
op, client->id, rc, do_reply, do_notify);
if (do_reply) {
int send_rc = pcmk_rc_ok;
if (reply == NULL) {
reply = create_lrmd_reply(__func__, rc, call_id);
}
send_rc = lrmd_server_send_reply(client, id, reply);
free_xml(reply);
if (send_rc != pcmk_rc_ok) {
crm_warn("Reply to client %s failed: %s " CRM_XS " rc=%d",
pcmk__client_name(client), pcmk_rc_str(send_rc), send_rc);
}
}
if (do_notify) {
send_generic_notify(rc, request);
}
}
diff --git a/etc/sysconfig/pacemaker.in b/etc/sysconfig/pacemaker.in
index 3b03ad6719..041da7195f 100644
--- a/etc/sysconfig/pacemaker.in
+++ b/etc/sysconfig/pacemaker.in
@@ -1,339 +1,338 @@
#
# Pacemaker start-up configuration
#
# This file contains environment variables that affect Pacemaker behavior.
# They are not options stored in the Cluster Information Base (CIB) because
# they may be needed before the CIB is available.
#
## Logging
# PCMK_logfacility
#
# Enable logging via the system log or journal, using the specified log
# facility. Messages sent here are of value to all Pacemaker administrators.
# This can be disabled using "none", but that is not recommended. Allowed
# values:
#
# none
# daemon
# user
# local0
# local1
# local2
# local3
# local4
# local5
# local6
# local7
#
# Default: PCMK_logfacility="daemon"
# PCMK_logpriority
#
# Unless system logging is disabled using PCMK_logfacility=none, messages of
# the specified log severity and higher will be sent to the system log. The
# default is appropriate for most installations. Allowed values:
#
# emerg
# alert
# crit
# error
# warning
# notice
# info
# debug
#
# Default: PCMK_logpriority="notice"
# PCMK_logfile
#
# Unless set to "none", more detailed log messages will be sent to the
# specified file (in addition to the system log, if enabled). These messages
# may have extended information, and will include messages of info severity.
# This log is of more use to developers and advanced system administrators, and
# when reporting problems.
#
# Default: PCMK_logfile="@CRM_LOG_DIR@/pacemaker.log"
# PCMK_logfile_mode
#
# Pacemaker will set the permissions on the detail log to this value (see
# chmod(1)).
#
# Default: PCMK_logfile_mode="0660"
# PCMK_debug (Advanced Use Only)
#
# Whether to send debug severity messages to the detail log.
# This may be set for all subsystems (yes or no) or for specific
# (comma-separated) subsystems. Allowed subsystems are:
#
# pacemakerd
# pacemaker-attrd
# pacemaker-based
# pacemaker-controld
# pacemaker-execd
# pacemaker-fenced
# pacemaker-schedulerd
#
# Default: PCMK_debug="no"
# Example: PCMK_debug="pacemakerd,pacemaker-execd"
# PCMK_trace_functions (Advanced Use Only)
#
# Send debug and trace severity messages from these (comma-separated)
# source code functions to the detail log.
#
# Default: PCMK_trace_functions=""
# Example: PCMK_trace_functions="unpack_colocation_set,pcmk__cmp_instance"
# PCMK_trace_files (Advanced Use Only)
#
# Send debug and trace severity messages from all functions in these
# (comma-separated) source file names to the detail log.
#
# Default: PCMK_trace_files=""
# Example: PCMK_trace_files="remote.c,watchdog.c"
# PCMK_trace_formats (Advanced Use Only)
#
# Send trace severity messages that are generated by these (comma-separated)
# format strings in the source code to the detail log.
#
# Default: PCMK_trace_formats=""
# Example: PCMK_trace_formats="TLS handshake failed: %s (%d)"
# PCMK_trace_tags (Advanced Use Only)
#
# Send debug and trace severity messages related to these (comma-separated)
# resource IDs to the detail log.
#
# Default: PCMK_trace_tags=""
# Example: PCMK_trace_tags="client-ip,dbfs"
# PCMK_blackbox (Advanced Use Only)
#
# Enable blackbox logging globally (yes or no) or by subsystem. A blackbox
# contains a rolling buffer of all logs (of all severities). Blackboxes are
# stored under @CRM_BLACKBOX_DIR@ by default, and their contents can
# be viewed using the qb-blackbox(8) command.
#
# The blackbox recorder can be enabled at start using this variable, or at
# runtime by sending a Pacemaker subsystem daemon process a SIGUSR1 or SIGTRAP
# signal, and disabled by sending SIGUSR2 (see kill(1)). The blackbox will be
# written after a crash, assertion failure, or SIGTRAP signal.
#
# Default: PCMK_blackbox="no"
# Example: PCMK_blackbox="pacemaker-controld,pacemaker-fenced"
# PCMK_trace_blackbox (Advanced Use Only)
#
# Write a blackbox whenever the message at the specified function and line is
# logged. Multiple entries may be comma-separated.
#
# Default: PCMK_trace_blackbox=""
# Example: PCMK_trace_blackbox="remote.c:144,remote.c:149"
## Node start state
# PCMK_node_start_state
#
# By default, the local host will join the cluster in an online or standby
# state when Pacemaker first starts depending on whether it was previously put
# into standby mode. If this variable is set to "standby" or "online", it will
-# force the local host to join in the specified state. This has no effect on
-# Pacemaker Remote nodes.
+# force the local host to join in the specified state.
#
# Default: PCMK_node_start_state="default"
## Crash Handling
# PCMK_fail_fast
#
# By default, if a Pacemaker subsystem crashes, the main pacemakerd process
# will attempt to restart it. If this variable is set to "yes", pacemakerd
# will panic the local host instead.
#
# Default: PCMK_fail_fast="no"
# PCMK_panic_action
#
# Pacemaker will panic the local host under certain conditions. By default,
# this means rebooting the host. This variable can change that behavior: if
# "crash", trigger a kernel crash (useful if you want a kernel dump to
# investigate); if "sync-reboot" or "sync-crash", synchronize filesystems
# before rebooting the host or triggering a kernel crash. The sync values are
# more likely to preserve log messages, but with the risk that the host may be
# left active if the synchronization hangs.
#
# Default: PCMK_panic_action="reboot"
## Pacemaker Remote
# PCMK_authkey_location
#
# Use the contents of this file as the authorization key to use with Pacemaker
# Remote connections. This file must be readable by Pacemaker daemons (that is,
# it must allow read permissions to either the hacluster user or the haclient
# group), and its contents must be identical on all nodes.
#
# Default: PCMK_authkey_location="@PACEMAKER_CONFIG_DIR@/authkey"
# PCMK_remote_address
#
# By default, if the Pacemaker Remote service is run on the local node, it will
# listen for connections on all IP addresses. This may be set to one address to
# listen on instead, as a resolvable hostname or as a numeric IPv4 or IPv6
# address. When resolving names or listening on all addresses, IPv6 will be
# preferred if available. When listening on an IPv6 address, IPv4 clients will
# be supported via IPv4-mapped IPv6 addresses.
#
# Default: PCMK_remote_address=""
# Example: PCMK_remote_address="192.0.2.1"
# PCMK_remote_port
#
# Use this TCP port number for Pacemaker Remote node connections. This value
# must be the same on all nodes.
#
# Default: PCMK_remote_port="3121"
# PCMK_tls_priorities (Advanced Use Only)
#
# These GnuTLS cipher priorities will be used for TLS connections (whether for
# Pacemaker Remote connections or remote CIB access, when enabled). See:
#
# https://gnutls.org/manual/html_node/Priority-Strings.html
#
# Pacemaker will append ":+ANON-DH" for remote CIB access and ":+DHE-PSK:+PSK"
# for Pacemaker Remote connections, as they are required for the respective
# functionality.
#
# Default: PCMK_tls_priorities="@PCMK_GNUTLS_PRIORITIES@"
# Example: PCMK_tls_priorities="SECURE128:+SECURE192:-VERS-ALL:+VERS-TLS1.2"
# PCMK_dh_min_bits (Advanced Use Only)
#
# Set a lower bound on the bit length of the prime number generated for
# Diffie-Hellman parameters needed by TLS connections. The default is no
# minimum.
#
# The server (Pacemaker Remote daemon, or CIB manager configured to accept
# remote clients) will use this value to provide a floor for the value
# recommended by the GnuTLS library. The library will only accept a limited
# number of specific values, which vary by library version, so setting these is
# recommended only when required for compatibility with specific client
# versions.
#
# Clients (connecting cluster nodes or remote CIB commands) will require that
# the server use a prime of at least this size. This is recommended only when
# the value must be lowered in order for the client's GnuTLS library to accept
# a connection to an older server.
#
# Default: PCMK_dh_min_bits="1024"
# PCMK_dh_max_bits (Advanced Use Only)
#
# Set an upper bound on the bit length of the prime number generated for
# Diffie-Hellman parameters needed by TLS connections. The default is no
# maximum.
#
# The server (Pacemaker Remote daemon, or CIB manager configured to accept
# remote clients) will use this value to provide a ceiling for the value
# recommended by the GnuTLS library. The library will only accept a limited
# number of specific values, which vary by library version, so setting these is
# recommended only when required for compatibility with specific client
# versions.
#
# Clients do not use PCMK_dh_max_bits.
#
# Default: PCMK_dh_max_bits="2048"
## Inter-process Communication
# PCMK_ipc_type (Advanced Use Only)
#
# Force use of a particular IPC method. Allowed values:
#
# shared-mem
# socket
# posix
# sysv
#
# Default: PCMK_ipc_type="shared-mem"
# PCMK_ipc_buffer (Advanced Use Only)
#
# Specify an IPC buffer size in bytes. This can be useful when connecting to
# large clusters that result in messages exceeding the default size (which will
# also result in log messages referencing this variable).
#
# Default: PCMK_ipc_buffer="131072"
## Developer Options
# PCMK_schema_directory (Advanced Use Only)
#
# Specify an alternate location for RNG schemas and XSL transforms.
#
# Default: PCMK_schema_directory="@CRM_SCHEMA_DIRECTORY@"
# G_SLICE (Advanced Use Only)
#
# Affect the behavior of glib's memory allocator. Setting to "always-malloc"
# when running under valgrind will help valgrind track malloc/free better;
# setting to "debug-blocks" when not running under valgrind will perform
# (somewhat expensive) memory checks.
#
# Default: G_SLICE=""
# Example: G_SLICE="always-malloc"
# MALLOC_PERTURB_ (Advanced Use Only)
#
# Setting this to a decimal byte value will make malloc() initialize newly
# allocated memory and free() wipe it, to help catch uninitialized-memory and
# use-after-free bugs.
#
# Default: MALLOC_PERTURB_=""
# Example: MALLOC_PERTURB_="221"
# MALLOC_CHECK_ (Advanced Use Only)
#
# Setting this to 3 will make malloc() and friends print to stderr and abort
# for some (inexpensive) memory checks.
#
# Default: MALLOC_CHECK_=""
# Example: MALLOC_CHECK_="3"
# PCMK_valgrind_enabled (Advanced Use Only)
#
# Whether subsystem daemons should be run under valgrind. Allowed values are
# the same as for PCMK_debug.
#
# Default: PCMK_valgrind_enabled="no"
# PCMK_callgrind_enabled
#
# Whether subsystem daemons should be run under valgrind with the callgrind
# tool enabled. Allowed values are the same as for PCMK_debug.
#
# Default: PCMK_callgrind_enabled="no"
# VALGRIND_OPTS
#
# Pass these options to valgrind, when enabled (see valgrind(1)). "--vgdb=no"
# is specified because pacemaker-execd can lower privileges when executing
# commands, which would otherwise leave a bunch of unremovable files in /tmp.
#
# Default: VALGRIND_OPTS=""
VALGRIND_OPTS="--leak-check=full --trace-children=no --vgdb=no --num-callers=25"
VALGRIND_OPTS="$VALGRIND_OPTS --log-file=@CRM_PACEMAKER_DIR@/valgrind-%p"
VALGRIND_OPTS="$VALGRIND_OPTS --suppressions=@datadir@/pacemaker/tests/valgrind-pcmk.suppressions"
VALGRIND_OPTS="$VALGRIND_OPTS --gen-suppressions=all"
diff --git a/include/crm/common/ipc_internal.h b/include/crm/common/ipc_internal.h
index 5099dda889..d20392495b 100644
--- a/include/crm/common/ipc_internal.h
+++ b/include/crm/common/ipc_internal.h
@@ -1,293 +1,294 @@
/*
* Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__IPC_INTERNAL_H
#define PCMK__IPC_INTERNAL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h> // bool
#include <stdint.h> // uint32_t, uint64_t, UINT64_C()
#include <sys/uio.h> // struct iovec
#include <sys/types.h> // uid_t, gid_t, pid_t, size_t
#ifdef HAVE_GNUTLS_GNUTLS_H
# include <gnutls/gnutls.h> // gnutls_session_t
#endif
#include <glib.h> // guint, gpointer, GQueue, ...
#include <libxml/tree.h> // xmlNode
#include <qb/qbipcs.h> // qb_ipcs_connection_t, ...
#include <crm_config.h> // HAVE_GETPEEREID
#include <crm/common/ipc.h>
#include <crm/common/ipc_controld.h> // pcmk_controld_api_reply
#include <crm/common/ipc_pacemakerd.h> // pcmk_pacemakerd_{api_reply,state}
#include <crm/common/mainloop.h> // mainloop_io_t
/*
* XML attribute names used only by internal code
*/
#define PCMK__XA_IPC_PROTO_VERSION "ipc-protocol-version"
/* denotes "non yieldable PID" on FreeBSD, or actual PID1 in scenarios that
require a delicate handling anyway (socket-based activation with systemd);
we can be reasonably sure that this PID is never possessed by the actual
child daemon, as it gets taken either by the proper init, or by pacemakerd
itself (i.e. this precludes anything else); note that value of zero
is meant to carry "unset" meaning, and better not to bet on/conditionalize
over signedness of pid_t */
#define PCMK__SPECIAL_PID 1
// Timeout (in seconds) to use for IPC client sends, reply waits, etc.
#define PCMK__IPC_TIMEOUT 120
#if defined(HAVE_GETPEEREID)
/* on FreeBSD, we don't want to expose "non-yieldable PID" (leading to
"IPC liveness check only") as its nominal representation, which could
cause confusion -- this is unambiguous as long as there's no
socket-based activation like with systemd (very improbable) */
#define PCMK__SPECIAL_PID_AS_0(p) (((p) == PCMK__SPECIAL_PID) ? 0 : (p))
#else
#define PCMK__SPECIAL_PID_AS_0(p) (p)
#endif
/*!
* \internal
* \brief Check the authenticity and liveness of the process via IPC end-point
*
* When IPC daemon under given IPC end-point (name) detected, its authenticity
* is verified by the means of comparing against provided referential UID and
* GID, and the result of this check can be deduced from the return value.
* As an exception, referential UID of 0 (~ root) satisfies arbitrary
* detected daemon's credentials.
*
* \param[in] name IPC name to base the search on
* \param[in] refuid referential UID to check against
* \param[in] refgid referential GID to check against
* \param[out] gotpid to optionally store obtained PID of the found process
* upon returning 1 or -2
* (not available on FreeBSD, special value of 1,
* see PCMK__SPECIAL_PID, used instead, and the caller
* is required to special case this value respectively)
*
* \return Standard Pacemaker return code
*
* \note Return codes of particular interest include pcmk_rc_ipc_unresponsive
* indicating that no trace of IPC liveness was detected, and
* pcmk_rc_ipc_unauthorized indicating that the IPC endpoint is blocked by
* an unauthorized process.
* \note This function emits a log message for return codes other than
* pcmk_rc_ok and pcmk_rc_ipc_unresponsive, and when there isn't a perfect
* match in respect to \p reguid and/or \p refgid, for a possible
* least privilege principle violation.
*
* \see crm_ipc_is_authentic_process
*/
int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid,
gid_t refgid, pid_t *gotpid);
/*
* Server-related
*/
typedef struct pcmk__client_s pcmk__client_t;
struct pcmk__remote_s {
/* Shared */
char *buffer;
size_t buffer_size;
size_t buffer_offset;
int auth_timeout;
int tcp_socket;
mainloop_io_t *source;
time_t uptime;
+ char *start_state;
/* CIB-only */
char *token;
/* TLS only */
# ifdef HAVE_GNUTLS_GNUTLS_H
gnutls_session_t *tls_session;
# endif
};
enum pcmk__client_flags {
// Lower 32 bits are reserved for server (not library) use
// Next 8 bits are reserved for client type (sort of a cheap enum)
//! Client uses plain IPC
pcmk__client_ipc = (UINT64_C(1) << 32),
//! Client uses TCP connection
pcmk__client_tcp = (UINT64_C(1) << 33),
# ifdef HAVE_GNUTLS_GNUTLS_H
//! Client uses TCP with TLS
pcmk__client_tls = (UINT64_C(1) << 34),
# endif
// The rest are client attributes
//! Client IPC is proxied
pcmk__client_proxied = (UINT64_C(1) << 40),
//! Client is run by root or cluster user
pcmk__client_privileged = (UINT64_C(1) << 41),
//! Local client to be proxied
pcmk__client_to_proxy = (UINT64_C(1) << 42),
/*!
* \brief Client IPC connection accepted
*
* Used only for remote CIB connections via \c remote-tls-port.
*/
pcmk__client_authenticated = (UINT64_C(1) << 43),
# ifdef HAVE_GNUTLS_GNUTLS_H
//! Client TLS handshake is complete
pcmk__client_tls_handshake_complete = (UINT64_C(1) << 44),
# endif
};
#define PCMK__CLIENT_TYPE(client) ((client)->flags & UINT64_C(0xff00000000))
struct pcmk__client_s {
unsigned int pid;
char *id;
char *name;
char *user;
uint64_t flags; // Group of pcmk__client_flags
int request_id;
void *userdata;
int event_timer;
GQueue *event_queue;
/* Depending on the client type, only some of the following will be
* populated/valid. @TODO Maybe convert to a union.
*/
qb_ipcs_connection_t *ipcs; /* IPC */
struct pcmk__remote_s *remote; /* TCP/TLS */
unsigned int queue_backlog; /* IPC queue length after last flush */
unsigned int queue_max; /* Evict client whose queue grows this big */
};
#define pcmk__set_client_flags(client, flags_to_set) do { \
(client)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Client", pcmk__client_name(client), \
(client)->flags, (flags_to_set), #flags_to_set); \
} while (0)
#define pcmk__clear_client_flags(client, flags_to_clear) do { \
(client)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, \
"Client", pcmk__client_name(client), \
(client)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pcmk__set_ipc_flags(ipc_flags, ipc_name, flags_to_set) do { \
ipc_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
"IPC", (ipc_name), \
(ipc_flags), (flags_to_set), \
#flags_to_set); \
} while (0)
#define pcmk__clear_ipc_flags(ipc_flags, ipc_name, flags_to_clear) do { \
ipc_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
"IPC", (ipc_name), \
(ipc_flags), (flags_to_clear), \
#flags_to_clear); \
} while (0)
guint pcmk__ipc_client_count(void);
void pcmk__foreach_ipc_client(GHFunc func, gpointer user_data);
void pcmk__client_cleanup(void);
pcmk__client_t *pcmk__find_client(const qb_ipcs_connection_t *c);
pcmk__client_t *pcmk__find_client_by_id(const char *id);
const char *pcmk__client_name(const pcmk__client_t *c);
const char *pcmk__client_type_str(uint64_t client_type);
pcmk__client_t *pcmk__new_unauth_client(void *key);
pcmk__client_t *pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid);
void pcmk__free_client(pcmk__client_t *c);
void pcmk__drop_all_clients(qb_ipcs_service_t *s);
bool pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax);
xmlNode *pcmk__ipc_create_ack_as(const char *function, int line, uint32_t flags,
const char *tag, const char *ver, crm_exit_t status);
#define pcmk__ipc_create_ack(flags, tag, ver, st) \
pcmk__ipc_create_ack_as(__func__, __LINE__, (flags), (tag), (ver), (st))
int pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c,
uint32_t request, uint32_t flags, const char *tag,
const char *ver, crm_exit_t status);
#define pcmk__ipc_send_ack(c, req, flags, tag, ver, st) \
pcmk__ipc_send_ack_as(__func__, __LINE__, (c), (req), (flags), (tag), (ver), (st))
int pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message,
uint32_t max_send_size,
struct iovec **result, ssize_t *bytes);
int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message,
uint32_t flags);
int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags);
xmlNode *pcmk__client_data2xml(pcmk__client_t *c, void *data,
uint32_t *id, uint32_t *flags);
int pcmk__client_pid(qb_ipcs_connection_t *c);
void pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs,
struct qb_ipcs_service_handlers *cb);
void pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs,
struct qb_ipcs_service_handlers *cb);
void pcmk__serve_pacemakerd_ipc(qb_ipcs_service_t **ipcs,
struct qb_ipcs_service_handlers *cb);
qb_ipcs_service_t *pcmk__serve_schedulerd_ipc(struct qb_ipcs_service_handlers *cb);
qb_ipcs_service_t *pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb);
void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro,
qb_ipcs_service_t **ipcs_rw,
qb_ipcs_service_t **ipcs_shm,
struct qb_ipcs_service_handlers *ro_cb,
struct qb_ipcs_service_handlers *rw_cb);
void pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro,
qb_ipcs_service_t *ipcs_rw,
qb_ipcs_service_t *ipcs_shm);
static inline const char *
pcmk__ipc_sys_name(const char *ipc_name, const char *fallback)
{
return ipc_name ? ipc_name : ((crm_system_name ? crm_system_name : fallback));
}
const char *pcmk__pcmkd_state_enum2friendly(enum pcmk_pacemakerd_state state);
const char *pcmk__controld_api_reply2str(enum pcmk_controld_api_reply reply);
const char *pcmk__pcmkd_api_reply2str(enum pcmk_pacemakerd_api_reply reply);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/include/crm/lrmd_internal.h b/include/crm/lrmd_internal.h
index 5810554f82..d1cd25dfff 100644
--- a/include/crm/lrmd_internal.h
+++ b/include/crm/lrmd_internal.h
@@ -1,91 +1,92 @@
/*
* Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef LRMD_INTERNAL__H
#define LRMD_INTERNAL__H
#include <stdint.h> // uint32_t
#include <glib.h> // GList, GHashTable, gpointer
#include <libxml/tree.h> // xmlNode
#include <crm/common/ipc.h> // crm_ipc_t
#include <crm/common/mainloop.h> // mainloop_io_t, ipc_client_callbacks
#include <crm/common/output_internal.h> // pcmk__output_t
#include <crm/common/remote_internal.h> // pcmk__remote_t
#include <crm/lrmd.h> // lrmd_t, lrmd_event_data_t, lrmd_rsc_info_t
int lrmd__new(lrmd_t **api, const char *nodename, const char *server, int port);
int lrmd_send_attribute_alert(lrmd_t *lrmd, const GList *alert_list,
const char *node, uint32_t nodeid,
const char *attr_name, const char *attr_value);
int lrmd_send_node_alert(lrmd_t *lrmd, const GList *alert_list,
const char *node, uint32_t nodeid, const char *state);
int lrmd_send_fencing_alert(lrmd_t *lrmd, const GList *alert_list,
const char *target, const char *task,
const char *desc, int op_rc);
int lrmd_send_resource_alert(lrmd_t *lrmd, const GList *alert_list,
const char *node, const lrmd_event_data_t *op);
int lrmd__remote_send_xml(pcmk__remote_t *session, xmlNode *msg, uint32_t id,
const char *msg_type);
int lrmd__metadata_async(const lrmd_rsc_info_t *rsc,
void (*callback)(int pid,
const pcmk__action_result_t *result,
void *user_data),
void *user_data);
void lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc,
int op_status, const char *exit_reason);
void lrmd__reset_result(lrmd_event_data_t *event);
time_t lrmd__uptime(lrmd_t *lrmd);
+const char *lrmd__node_start_state(lrmd_t *lrmd);
/* Shared functions for IPC proxy back end */
typedef struct remote_proxy_s {
char *node_name;
char *session_id;
gboolean is_local;
crm_ipc_t *ipc;
mainloop_io_t *source;
uint32_t last_request_id;
lrmd_t *lrm;
} remote_proxy_t;
remote_proxy_t *remote_proxy_new(lrmd_t *lrmd,
struct ipc_client_callbacks *proxy_callbacks,
const char *node_name, const char *session_id,
const char *channel);
int lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash);
void remote_proxy_cb(lrmd_t *lrmd, const char *node_name, xmlNode *msg);
void remote_proxy_ack_shutdown(lrmd_t *lrmd);
void remote_proxy_nack_shutdown(lrmd_t *lrmd);
int remote_proxy_dispatch(const char *buffer, ssize_t length,
gpointer userdata);
void remote_proxy_disconnected(gpointer data);
void remote_proxy_free(gpointer data);
void remote_proxy_relay_event(remote_proxy_t *proxy, xmlNode *msg);
void remote_proxy_relay_response(remote_proxy_t *proxy, xmlNode *msg,
int msg_id);
void lrmd__register_messages(pcmk__output_t *out);
#ifdef HAVE_GNUTLS_GNUTLS_H
int lrmd__init_remote_key(gnutls_datum_t *key);
#endif
#endif
diff --git a/include/crm_internal.h b/include/crm_internal.h
index 5f6531f6ec..771bd260d3 100644
--- a/include/crm_internal.h
+++ b/include/crm_internal.h
@@ -1,118 +1,119 @@
/*
* Copyright 2006-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM_INTERNAL__H
# define CRM_INTERNAL__H
# ifndef PCMK__CONFIG_H
# define PCMK__CONFIG_H
# include <config.h>
# endif
# include <portability.h>
/* Our minimum glib dependency is 2.42. Define that as both the minimum and
* maximum glib APIs that are allowed (i.e. APIs that were already deprecated
* in 2.42, and APIs introduced after 2.42, cannot be used by Pacemaker code).
*/
#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_42
#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_42
# include <glib.h>
# include <stdbool.h>
# include <libxml/tree.h>
/* Public API headers can guard including deprecated API headers with this
* symbol, thus preventing internal code (which includes this header) from using
* deprecated APIs, while still allowing external code to use them by default.
*/
#define PCMK_ALLOW_DEPRECATED 0
# include <crm/lrmd.h>
# include <crm/common/logging.h>
# include <crm/common/logging_internal.h>
# include <crm/common/ipc_internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
# include <crm/common/xml_internal.h>
# include <crm/common/internal.h>
# include <locale.h>
# include <gettext.h>
#define N_(String) (String)
#ifdef ENABLE_NLS
# define _(String) gettext(String)
#else
# define _(String) (String)
#endif
/*
* XML attribute names used only by internal code
*/
#define PCMK__XA_ATTR_DAMPENING "attr_dampening"
#define PCMK__XA_ATTR_FORCE "attrd_is_force_write"
#define PCMK__XA_ATTR_INTERVAL "attr_clear_interval"
#define PCMK__XA_ATTR_IS_PRIVATE "attr_is_private"
#define PCMK__XA_ATTR_IS_REMOTE "attr_is_remote"
#define PCMK__XA_ATTR_NAME "attr_name"
#define PCMK__XA_ATTR_NODE_ID "attr_host_id"
#define PCMK__XA_ATTR_NODE_NAME "attr_host"
#define PCMK__XA_ATTR_OPERATION "attr_clear_operation"
#define PCMK__XA_ATTR_PATTERN "attr_regex"
#define PCMK__XA_ATTR_RESOURCE "attr_resource"
#define PCMK__XA_ATTR_SECTION "attr_section"
#define PCMK__XA_ATTR_SET "attr_set"
#define PCMK__XA_ATTR_SET_TYPE "attr_set_type"
#define PCMK__XA_ATTR_SYNC_POINT "attr_sync_point"
#define PCMK__XA_ATTR_USER "attr_user"
#define PCMK__XA_ATTR_UUID "attr_key"
#define PCMK__XA_ATTR_VALUE "attr_value"
#define PCMK__XA_ATTR_VERSION "attr_version"
#define PCMK__XA_ATTR_WRITER "attr_writer"
#define PCMK__XA_CONFIG_ERRORS "config-errors"
#define PCMK__XA_CONFIG_WARNINGS "config-warnings"
#define PCMK__XA_CONFIRM "confirm"
#define PCMK__XA_GRAPH_ERRORS "graph-errors"
#define PCMK__XA_GRAPH_WARNINGS "graph-warnings"
#define PCMK__XA_MODE "mode"
+#define PCMK__XA_NODE_START_STATE "node_start_state"
#define PCMK__XA_TASK "task"
#define PCMK__XA_UPTIME "uptime"
#define PCMK__XA_CONN_HOST "connection_host"
/*
* IPC service names that are only used internally
*/
# define PCMK__SERVER_BASED_RO "cib_ro"
# define PCMK__SERVER_BASED_RW "cib_rw"
# define PCMK__SERVER_BASED_SHM "cib_shm"
/*
* IPC commands that can be sent to Pacemaker daemons
*/
#define PCMK__ATTRD_CMD_PEER_REMOVE "peer-remove"
#define PCMK__ATTRD_CMD_UPDATE "update"
#define PCMK__ATTRD_CMD_UPDATE_BOTH "update-both"
#define PCMK__ATTRD_CMD_UPDATE_DELAY "update-delay"
#define PCMK__ATTRD_CMD_QUERY "query"
#define PCMK__ATTRD_CMD_REFRESH "refresh"
#define PCMK__ATTRD_CMD_FLUSH "flush"
#define PCMK__ATTRD_CMD_SYNC "sync"
#define PCMK__ATTRD_CMD_SYNC_RESPONSE "sync-response"
#define PCMK__ATTRD_CMD_CLEAR_FAILURE "clear-failure"
#define PCMK__ATTRD_CMD_CONFIRM "confirm"
#define PCMK__CONTROLD_CMD_NODES "list-nodes"
#endif /* CRM_INTERNAL__H */
diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c
index c565728679..82434b94a5 100644
--- a/lib/lrmd/lrmd_client.c
+++ b/lib/lrmd/lrmd_client.c
@@ -1,2533 +1,2552 @@
/*
* Copyright 2012-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h> // uint32_t, uint64_t
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <glib.h>
#include <dirent.h>
#include <crm/crm.h>
#include <crm/lrmd.h>
#include <crm/lrmd_internal.h>
#include <crm/services.h>
#include <crm/services_internal.h>
#include <crm/common/mainloop.h>
#include <crm/common/ipc_internal.h>
#include <crm/common/remote_internal.h>
#include <crm/msg_xml.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
#ifdef HAVE_GNUTLS_GNUTLS_H
# include <gnutls/gnutls.h>
#endif
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <netdb.h>
#define MAX_TLS_RECV_WAIT 10000
CRM_TRACE_INIT_DATA(lrmd);
static int lrmd_api_disconnect(lrmd_t * lrmd);
static int lrmd_api_is_connected(lrmd_t * lrmd);
/* IPC proxy functions */
int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
static void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg);
void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
#ifdef HAVE_GNUTLS_GNUTLS_H
# define LRMD_CLIENT_HANDSHAKE_TIMEOUT 5000 /* 5 seconds */
gnutls_psk_client_credentials_t psk_cred_s;
static void lrmd_tls_disconnect(lrmd_t * lrmd);
static int global_remote_msg_id = 0;
static void lrmd_tls_connection_destroy(gpointer userdata);
#endif
typedef struct lrmd_private_s {
uint64_t type;
char *token;
mainloop_io_t *source;
/* IPC parameters */
crm_ipc_t *ipc;
pcmk__remote_t *remote;
/* Extra TLS parameters */
char *remote_nodename;
#ifdef HAVE_GNUTLS_GNUTLS_H
char *server;
int port;
gnutls_psk_client_credentials_t psk_cred_c;
/* while the async connection is occurring, this is the id
* of the connection timeout timer. */
int async_timer;
int sock;
/* since tls requires a round trip across the network for a
* request/reply, there are times where we just want to be able
* to send a request from the client and not wait around (or even care
* about) what the reply is. */
int expected_late_replies;
GList *pending_notify;
crm_trigger_t *process_notify;
#endif
lrmd_event_callback callback;
/* Internal IPC proxy msg passing for remote guests */
void (*proxy_callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg);
void *proxy_callback_userdata;
char *peer_version;
} lrmd_private_t;
static lrmd_list_t *
lrmd_list_add(lrmd_list_t * head, const char *value)
{
lrmd_list_t *p, *end;
p = calloc(1, sizeof(lrmd_list_t));
p->val = strdup(value);
end = head;
while (end && end->next) {
end = end->next;
}
if (end) {
end->next = p;
} else {
head = p;
}
return head;
}
void
lrmd_list_freeall(lrmd_list_t * head)
{
lrmd_list_t *p;
while (head) {
char *val = (char *)head->val;
p = head->next;
free(val);
free(head);
head = p;
}
}
lrmd_key_value_t *
lrmd_key_value_add(lrmd_key_value_t * head, const char *key, const char *value)
{
lrmd_key_value_t *p, *end;
p = calloc(1, sizeof(lrmd_key_value_t));
p->key = strdup(key);
p->value = strdup(value);
end = head;
while (end && end->next) {
end = end->next;
}
if (end) {
end->next = p;
} else {
head = p;
}
return head;
}
void
lrmd_key_value_freeall(lrmd_key_value_t * head)
{
lrmd_key_value_t *p;
while (head) {
p = head->next;
free(head->key);
free(head->value);
free(head);
head = p;
}
}
/*!
* \brief Create a new lrmd_event_data_t object
*
* \param[in] rsc_id ID of resource involved in event
* \param[in] task Action name
* \param[in] interval_ms Action interval
*
* \return Newly allocated and initialized lrmd_event_data_t
* \note This functions asserts on memory errors, so the return value is
* guaranteed to be non-NULL. The caller is responsible for freeing the
* result with lrmd_free_event().
*/
lrmd_event_data_t *
lrmd_new_event(const char *rsc_id, const char *task, guint interval_ms)
{
lrmd_event_data_t *event = calloc(1, sizeof(lrmd_event_data_t));
CRM_ASSERT(event != NULL);
pcmk__str_update((char **) &event->rsc_id, rsc_id);
pcmk__str_update((char **) &event->op_type, task);
event->interval_ms = interval_ms;
return event;
}
lrmd_event_data_t *
lrmd_copy_event(lrmd_event_data_t * event)
{
lrmd_event_data_t *copy = NULL;
copy = calloc(1, sizeof(lrmd_event_data_t));
copy->type = event->type;
pcmk__str_update((char **) &copy->rsc_id, event->rsc_id);
pcmk__str_update((char **) &copy->op_type, event->op_type);
pcmk__str_update((char **) &copy->user_data, event->user_data);
copy->call_id = event->call_id;
copy->timeout = event->timeout;
copy->interval_ms = event->interval_ms;
copy->start_delay = event->start_delay;
copy->rsc_deleted = event->rsc_deleted;
copy->rc = event->rc;
copy->op_status = event->op_status;
pcmk__str_update((char **) &copy->output, event->output);
copy->t_run = event->t_run;
copy->t_rcchange = event->t_rcchange;
copy->exec_time = event->exec_time;
copy->queue_time = event->queue_time;
copy->connection_rc = event->connection_rc;
copy->params = pcmk__str_table_dup(event->params);
pcmk__str_update((char **) &copy->remote_nodename, event->remote_nodename);
pcmk__str_update((char **) &copy->exit_reason, event->exit_reason);
return copy;
}
/*!
* \brief Free an executor event
*
* \param[in,out] Executor event object to free
*/
void
lrmd_free_event(lrmd_event_data_t *event)
{
if (event == NULL) {
return;
}
// @TODO Why are these const char *?
free((void *) event->rsc_id);
free((void *) event->op_type);
free((void *) event->user_data);
free((void *) event->remote_nodename);
lrmd__reset_result(event);
if (event->params != NULL) {
g_hash_table_destroy(event->params);
}
free(event);
}
static void
lrmd_dispatch_internal(lrmd_t * lrmd, xmlNode * msg)
{
const char *type;
const char *proxy_session = crm_element_value(msg, F_LRMD_IPC_SESSION);
lrmd_private_t *native = lrmd->lrmd_private;
lrmd_event_data_t event = { 0, };
if (proxy_session != NULL) {
/* this is proxy business */
lrmd_internal_proxy_dispatch(lrmd, msg);
return;
} else if (!native->callback) {
/* no callback set */
crm_trace("notify event received but client has not set callback");
return;
}
event.remote_nodename = native->remote_nodename;
type = crm_element_value(msg, F_LRMD_OPERATION);
crm_element_value_int(msg, F_LRMD_CALLID, &event.call_id);
event.rsc_id = crm_element_value(msg, F_LRMD_RSC_ID);
if (pcmk__str_eq(type, LRMD_OP_RSC_REG, pcmk__str_none)) {
event.type = lrmd_event_register;
} else if (pcmk__str_eq(type, LRMD_OP_RSC_UNREG, pcmk__str_none)) {
event.type = lrmd_event_unregister;
} else if (pcmk__str_eq(type, LRMD_OP_RSC_EXEC, pcmk__str_none)) {
time_t epoch = 0;
crm_element_value_int(msg, F_LRMD_TIMEOUT, &event.timeout);
crm_element_value_ms(msg, F_LRMD_RSC_INTERVAL, &event.interval_ms);
crm_element_value_int(msg, F_LRMD_RSC_START_DELAY, &event.start_delay);
crm_element_value_int(msg, F_LRMD_EXEC_RC, (int *)&event.rc);
crm_element_value_int(msg, F_LRMD_OP_STATUS, &event.op_status);
crm_element_value_int(msg, F_LRMD_RSC_DELETED, &event.rsc_deleted);
crm_element_value_epoch(msg, F_LRMD_RSC_RUN_TIME, &epoch);
event.t_run = (unsigned int) epoch;
crm_element_value_epoch(msg, F_LRMD_RSC_RCCHANGE_TIME, &epoch);
event.t_rcchange = (unsigned int) epoch;
crm_element_value_int(msg, F_LRMD_RSC_EXEC_TIME, (int *)&event.exec_time);
crm_element_value_int(msg, F_LRMD_RSC_QUEUE_TIME, (int *)&event.queue_time);
event.op_type = crm_element_value(msg, F_LRMD_RSC_ACTION);
event.user_data = crm_element_value(msg, F_LRMD_RSC_USERDATA_STR);
event.type = lrmd_event_exec_complete;
/* output and exit_reason may be freed by a callback */
event.output = crm_element_value_copy(msg, F_LRMD_RSC_OUTPUT);
lrmd__set_result(&event, event.rc, event.op_status,
crm_element_value(msg, F_LRMD_RSC_EXIT_REASON));
event.params = xml2list(msg);
} else if (pcmk__str_eq(type, LRMD_OP_NEW_CLIENT, pcmk__str_none)) {
event.type = lrmd_event_new_client;
} else if (pcmk__str_eq(type, LRMD_OP_POKE, pcmk__str_none)) {
event.type = lrmd_event_poke;
} else {
return;
}
crm_trace("op %s notify event received", type);
native->callback(&event);
if (event.params) {
g_hash_table_destroy(event.params);
}
lrmd__reset_result(&event);
}
// \return Always 0, to indicate that IPC mainloop source should be kept
static int
lrmd_ipc_dispatch(const char *buffer, ssize_t length, gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
if (native->callback != NULL) {
xmlNode *msg = string2xml(buffer);
lrmd_dispatch_internal(lrmd, msg);
free_xml(msg);
}
return 0;
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_free_xml(gpointer userdata)
{
free_xml((xmlNode *) userdata);
}
static bool
remote_executor_connected(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
return (native->remote->tls_session != NULL);
}
/*!
* \internal
* \brief TLS dispatch function (for both trigger and file descriptor sources)
*
* \param[in,out] userdata API connection
*
* \return Always return a nonnegative value, which as a file descriptor
* dispatch function means keep the mainloop source, and as a
* trigger dispatch function, 0 means remove the trigger from the
* mainloop while 1 means keep it (and job completed)
*/
static int
lrmd_tls_dispatch(gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *xml = NULL;
int rc = pcmk_rc_ok;
if (!remote_executor_connected(lrmd)) {
crm_trace("TLS dispatch triggered after disconnect");
return 0;
}
crm_trace("TLS dispatch triggered");
/* First check if there are any pending notifies to process that came
* while we were waiting for replies earlier. */
if (native->pending_notify) {
GList *iter = NULL;
crm_trace("Processing pending notifies");
for (iter = native->pending_notify; iter; iter = iter->next) {
lrmd_dispatch_internal(lrmd, iter->data);
}
g_list_free_full(native->pending_notify, lrmd_free_xml);
native->pending_notify = NULL;
}
/* Next read the current buffer and see if there are any messages to handle. */
switch (pcmk__remote_ready(native->remote, 0)) {
case pcmk_rc_ok:
rc = pcmk__read_remote_message(native->remote, -1);
xml = pcmk__remote_message_xml(native->remote);
break;
case ETIME:
// Nothing to read, check if a full message is already in buffer
xml = pcmk__remote_message_xml(native->remote);
break;
default:
rc = ENOTCONN;
break;
}
while (xml) {
const char *msg_type = crm_element_value(xml, F_LRMD_REMOTE_MSG_TYPE);
if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) {
lrmd_dispatch_internal(lrmd, xml);
} else if (pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) {
if (native->expected_late_replies > 0) {
native->expected_late_replies--;
} else {
int reply_id = 0;
crm_element_value_int(xml, F_LRMD_CALLID, &reply_id);
/* if this happens, we want to know about it */
crm_err("Got outdated Pacemaker Remote reply %d", reply_id);
}
}
free_xml(xml);
xml = pcmk__remote_message_xml(native->remote);
}
if (rc == ENOTCONN) {
crm_info("Lost %s executor connection while reading data",
(native->remote_nodename? native->remote_nodename : "local"));
lrmd_tls_disconnect(lrmd);
return 0;
}
return 1;
}
#endif
/* Not used with mainloop */
int
lrmd_poll(lrmd_t * lrmd, int timeout)
{
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case pcmk__client_ipc:
return crm_ipc_ready(native->ipc);
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
if (native->pending_notify) {
return 1;
} else {
int rc = pcmk__remote_ready(native->remote, 0);
switch (rc) {
case pcmk_rc_ok:
return 1;
case ETIME:
return 0;
default:
return pcmk_rc2legacy(rc);
}
}
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
return -EPROTONOSUPPORT;
}
}
/* Not used with mainloop */
bool
lrmd_dispatch(lrmd_t * lrmd)
{
lrmd_private_t *private = NULL;
CRM_ASSERT(lrmd != NULL);
private = lrmd->lrmd_private;
switch (private->type) {
case pcmk__client_ipc:
while (crm_ipc_ready(private->ipc)) {
if (crm_ipc_read(private->ipc) > 0) {
const char *msg = crm_ipc_buffer(private->ipc);
lrmd_ipc_dispatch(msg, strlen(msg), lrmd);
}
}
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
lrmd_tls_dispatch(lrmd);
break;
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
private->type);
}
if (lrmd_api_is_connected(lrmd) == FALSE) {
crm_err("Connection closed");
return FALSE;
}
return TRUE;
}
static xmlNode *
lrmd_create_op(const char *token, const char *op, xmlNode *data, int timeout,
enum lrmd_call_options options)
{
xmlNode *op_msg = create_xml_node(NULL, "lrmd_command");
CRM_CHECK(op_msg != NULL, return NULL);
CRM_CHECK(token != NULL, return NULL);
crm_xml_add(op_msg, F_XML_TAGNAME, "lrmd_command");
crm_xml_add(op_msg, F_TYPE, T_LRMD);
crm_xml_add(op_msg, F_LRMD_CALLBACK_TOKEN, token);
crm_xml_add(op_msg, F_LRMD_OPERATION, op);
crm_xml_add_int(op_msg, F_LRMD_TIMEOUT, timeout);
crm_xml_add_int(op_msg, F_LRMD_CALLOPTS, options);
if (data != NULL) {
add_message_xml(op_msg, F_LRMD_CALLDATA, data);
}
crm_trace("Created executor %s command with call options %.8lx (%d)",
op, (long)options, options);
return op_msg;
}
static void
lrmd_ipc_connection_destroy(gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
crm_info("IPC connection destroyed");
/* Prevent these from being cleaned up in lrmd_api_disconnect() */
native->ipc = NULL;
native->source = NULL;
if (native->callback) {
lrmd_event_data_t event = { 0, };
event.type = lrmd_event_disconnect;
event.remote_nodename = native->remote_nodename;
native->callback(&event);
}
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_tls_connection_destroy(gpointer userdata)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
crm_info("TLS connection destroyed");
if (native->remote->tls_session) {
gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
}
if (native->psk_cred_c) {
gnutls_psk_free_client_credentials(native->psk_cred_c);
}
if (native->sock) {
close(native->sock);
}
if (native->process_notify) {
mainloop_destroy_trigger(native->process_notify);
native->process_notify = NULL;
}
if (native->pending_notify) {
g_list_free_full(native->pending_notify, lrmd_free_xml);
native->pending_notify = NULL;
}
free(native->remote->buffer);
+ free(native->remote->start_state);
native->remote->buffer = NULL;
+ native->remote->start_state = NULL;
native->source = 0;
native->sock = 0;
native->psk_cred_c = NULL;
native->remote->tls_session = NULL;
native->sock = 0;
if (native->callback) {
lrmd_event_data_t event = { 0, };
event.remote_nodename = native->remote_nodename;
event.type = lrmd_event_disconnect;
native->callback(&event);
}
return;
}
// \return Standard Pacemaker return code
int
lrmd__remote_send_xml(pcmk__remote_t *session, xmlNode *msg, uint32_t id,
const char *msg_type)
{
crm_xml_add_int(msg, F_LRMD_REMOTE_MSG_ID, id);
crm_xml_add(msg, F_LRMD_REMOTE_MSG_TYPE, msg_type);
return pcmk__remote_send_xml(session, msg);
}
// \return Standard Pacemaker return code
static int
read_remote_reply(lrmd_t *lrmd, int total_timeout, int expected_reply_id,
xmlNode **reply)
{
lrmd_private_t *native = lrmd->lrmd_private;
time_t start = time(NULL);
const char *msg_type = NULL;
int reply_id = 0;
int remaining_timeout = 0;
int rc = pcmk_rc_ok;
/* A timeout of 0 here makes no sense. We have to wait a period of time
* for the response to come back. If -1 or 0, default to 10 seconds. */
if (total_timeout <= 0 || total_timeout > MAX_TLS_RECV_WAIT) {
total_timeout = MAX_TLS_RECV_WAIT;
}
for (*reply = NULL; *reply == NULL; ) {
*reply = pcmk__remote_message_xml(native->remote);
if (*reply == NULL) {
/* read some more off the tls buffer if we still have time left. */
if (remaining_timeout) {
remaining_timeout = total_timeout - ((time(NULL) - start) * 1000);
} else {
remaining_timeout = total_timeout;
}
if (remaining_timeout <= 0) {
return ETIME;
}
rc = pcmk__read_remote_message(native->remote, remaining_timeout);
if (rc != pcmk_rc_ok) {
return rc;
}
*reply = pcmk__remote_message_xml(native->remote);
if (*reply == NULL) {
return ENOMSG;
}
}
crm_element_value_int(*reply, F_LRMD_REMOTE_MSG_ID, &reply_id);
msg_type = crm_element_value(*reply, F_LRMD_REMOTE_MSG_TYPE);
if (!msg_type) {
crm_err("Empty msg type received while waiting for reply");
free_xml(*reply);
*reply = NULL;
} else if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) {
/* got a notify while waiting for reply, trigger the notify to be processed later */
crm_info("queueing notify");
native->pending_notify = g_list_append(native->pending_notify, *reply);
if (native->process_notify) {
crm_info("notify trigger set.");
mainloop_set_trigger(native->process_notify);
}
*reply = NULL;
} else if (!pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) {
/* msg isn't a reply, make some noise */
crm_err("Expected a reply, got %s", msg_type);
free_xml(*reply);
*reply = NULL;
} else if (reply_id != expected_reply_id) {
if (native->expected_late_replies > 0) {
native->expected_late_replies--;
} else {
crm_err("Got outdated reply, expected id %d got id %d", expected_reply_id, reply_id);
}
free_xml(*reply);
*reply = NULL;
}
}
if (native->remote->buffer && native->process_notify) {
mainloop_set_trigger(native->process_notify);
}
return rc;
}
// \return Standard Pacemaker return code
static int
send_remote_message(lrmd_t *lrmd, xmlNode *msg)
{
int rc = pcmk_rc_ok;
lrmd_private_t *native = lrmd->lrmd_private;
global_remote_msg_id++;
if (global_remote_msg_id <= 0) {
global_remote_msg_id = 1;
}
rc = lrmd__remote_send_xml(native->remote, msg, global_remote_msg_id,
"request");
if (rc != pcmk_rc_ok) {
crm_err("Disconnecting because TLS message could not be sent to "
"Pacemaker Remote: %s", pcmk_rc_str(rc));
lrmd_tls_disconnect(lrmd);
}
return rc;
}
static int
lrmd_tls_send_recv(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply)
{
int rc = 0;
xmlNode *xml = NULL;
if (!remote_executor_connected(lrmd)) {
return -ENOTCONN;
}
rc = send_remote_message(lrmd, msg);
if (rc != pcmk_rc_ok) {
return pcmk_rc2legacy(rc);
}
rc = read_remote_reply(lrmd, timeout, global_remote_msg_id, &xml);
if (rc != pcmk_rc_ok) {
crm_err("Disconnecting remote after request %d reply not received: %s "
CRM_XS " rc=%d timeout=%dms",
global_remote_msg_id, pcmk_rc_str(rc), rc, timeout);
lrmd_tls_disconnect(lrmd);
}
if (reply) {
*reply = xml;
} else {
free_xml(xml);
}
return pcmk_rc2legacy(rc);
}
#endif
static int
lrmd_send_xml(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case pcmk__client_ipc:
rc = crm_ipc_send(native->ipc, msg, crm_ipc_client_response, timeout, reply);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
rc = lrmd_tls_send_recv(lrmd, msg, timeout, reply);
break;
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
rc = -EPROTONOSUPPORT;
}
return rc;
}
static int
lrmd_send_xml_no_reply(lrmd_t * lrmd, xmlNode * msg)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case pcmk__client_ipc:
rc = crm_ipc_send(native->ipc, msg, crm_ipc_flags_none, 0, NULL);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
rc = send_remote_message(lrmd, msg);
if (rc == pcmk_rc_ok) {
/* we don't want to wait around for the reply, but
* since the request/reply protocol needs to behave the same
* as libqb, a reply will eventually come later anyway. */
native->expected_late_replies++;
}
rc = pcmk_rc2legacy(rc);
break;
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
rc = -EPROTONOSUPPORT;
}
return rc;
}
static int
lrmd_api_is_connected(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case pcmk__client_ipc:
return crm_ipc_connected(native->ipc);
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
return remote_executor_connected(lrmd);
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
return 0;
}
}
/*!
* \internal
* \brief Send a prepared API command to the executor
*
* \param[in,out] lrmd Existing connection to the executor
* \param[in] op Name of API command to send
* \param[in] data Command data XML to add to the sent command
* \param[out] output_data If expecting a reply, it will be stored here
* \param[in] timeout Timeout in milliseconds (if 0, defaults to
* a sensible value per the type of connection,
* standard vs. pacemaker remote);
* also propagated to the command XML
* \param[in] call_options Call options to pass to server when sending
* \param[in] expect_reply If TRUE, wait for a reply from the server;
* must be TRUE for IPC (as opposed to TLS) clients
*
* \return pcmk_ok on success, -errno on error
*/
static int
lrmd_send_command(lrmd_t *lrmd, const char *op, xmlNode *data,
xmlNode **output_data, int timeout,
enum lrmd_call_options options, gboolean expect_reply)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *op_msg = NULL;
xmlNode *op_reply = NULL;
if (!lrmd_api_is_connected(lrmd)) {
return -ENOTCONN;
}
if (op == NULL) {
crm_err("No operation specified");
return -EINVAL;
}
CRM_CHECK(native->token != NULL,;
);
crm_trace("Sending %s op to executor", op);
op_msg = lrmd_create_op(native->token, op, data, timeout, options);
if (op_msg == NULL) {
return -EINVAL;
}
if (expect_reply) {
rc = lrmd_send_xml(lrmd, op_msg, timeout, &op_reply);
} else {
rc = lrmd_send_xml_no_reply(lrmd, op_msg);
goto done;
}
if (rc < 0) {
crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%d): %d", op, timeout, rc);
goto done;
} else if(op_reply == NULL) {
rc = -ENOMSG;
goto done;
}
rc = pcmk_ok;
crm_trace("%s op reply received", op);
if (crm_element_value_int(op_reply, F_LRMD_RC, &rc) != 0) {
rc = -ENOMSG;
goto done;
}
crm_log_xml_trace(op_reply, "Reply");
if (output_data) {
*output_data = op_reply;
op_reply = NULL; /* Prevent subsequent free */
}
done:
if (lrmd_api_is_connected(lrmd) == FALSE) {
crm_err("Executor disconnected");
}
free_xml(op_msg);
free_xml(op_reply);
return rc;
}
static int
lrmd_api_poke_connection(lrmd_t * lrmd)
{
int rc;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
rc = lrmd_send_command(lrmd, LRMD_OP_POKE, data, NULL, 0, 0,
(native->type == pcmk__client_ipc));
free_xml(data);
return rc < 0 ? rc : pcmk_ok;
}
// \return Standard Pacemaker return code
int
lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash)
{
int rc = pcmk_rc_ok;
const char *value;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *data = create_xml_node(NULL, F_LRMD_OPERATION);
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
value = g_hash_table_lookup(hash, "stonith-watchdog-timeout");
if ((value) &&
(stonith__watchdog_fencing_enabled_for_node(native->remote_nodename))) {
crm_xml_add(data, F_LRMD_WATCHDOG, value);
}
rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0,
(native->type == pcmk__client_ipc));
free_xml(data);
return (rc < 0)? pcmk_legacy2rc(rc) : pcmk_rc_ok;
}
static int
lrmd_handshake(lrmd_t * lrmd, const char *name)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
xmlNode *reply = NULL;
xmlNode *hello = create_xml_node(NULL, "lrmd_command");
crm_xml_add(hello, F_TYPE, T_LRMD);
crm_xml_add(hello, F_LRMD_OPERATION, CRM_OP_REGISTER);
crm_xml_add(hello, F_LRMD_CLIENTNAME, name);
crm_xml_add(hello, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION);
/* advertise that we are a proxy provider */
if (native->proxy_callback) {
pcmk__xe_set_bool_attr(hello, F_LRMD_IS_IPC_PROVIDER, true);
}
rc = lrmd_send_xml(lrmd, hello, -1, &reply);
if (rc < 0) {
crm_perror(LOG_DEBUG, "Couldn't complete registration with the executor API: %d", rc);
rc = -ECOMM;
} else if (reply == NULL) {
crm_err("Did not receive registration reply");
rc = -EPROTO;
} else {
const char *version = crm_element_value(reply, F_LRMD_PROTOCOL_VERSION);
const char *msg_type = crm_element_value(reply, F_LRMD_OPERATION);
const char *tmp_ticket = crm_element_value(reply, F_LRMD_CLIENTID);
+ const char *start_state = crm_element_value(reply, PCMK__XA_NODE_START_STATE);
long long uptime = -1;
crm_element_value_int(reply, F_LRMD_RC, &rc);
/* The remote executor may add its uptime to the XML reply, which is
* useful in handling transient attributes when the connection to the
* remote node unexpectedly drops. If no parameter is given, just
* default to -1.
*/
crm_element_value_ll(reply, PCMK__XA_UPTIME, &uptime);
native->remote->uptime = uptime;
+ if (start_state) {
+ native->remote->start_state = strdup(start_state);
+ }
+
if (rc == -EPROTO) {
crm_err("Executor protocol version mismatch between client (%s) and server (%s)",
LRMD_PROTOCOL_VERSION, version);
crm_log_xml_err(reply, "Protocol Error");
} else if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) {
crm_err("Invalid registration message: %s", msg_type);
crm_log_xml_err(reply, "Bad reply");
rc = -EPROTO;
} else if (tmp_ticket == NULL) {
crm_err("No registration token provided");
crm_log_xml_err(reply, "Bad reply");
rc = -EPROTO;
} else {
crm_trace("Obtained registration token: %s", tmp_ticket);
native->token = strdup(tmp_ticket);
native->peer_version = strdup(version?version:"1.0"); /* Included since 1.1 */
rc = pcmk_ok;
}
}
free_xml(reply);
free_xml(hello);
if (rc != pcmk_ok) {
lrmd_api_disconnect(lrmd);
}
return rc;
}
static int
lrmd_ipc_connect(lrmd_t * lrmd, int *fd)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
struct ipc_client_callbacks lrmd_callbacks = {
.dispatch = lrmd_ipc_dispatch,
.destroy = lrmd_ipc_connection_destroy
};
crm_info("Connecting to executor");
if (fd) {
/* No mainloop */
native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0);
if (native->ipc && crm_ipc_connect(native->ipc)) {
*fd = crm_ipc_get_fd(native->ipc);
} else if (native->ipc) {
crm_perror(LOG_ERR, "Connection to executor failed");
rc = -ENOTCONN;
}
} else {
native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks);
native->ipc = mainloop_get_ipc_client(native->source);
}
if (native->ipc == NULL) {
crm_debug("Could not connect to the executor API");
rc = -ENOTCONN;
}
return rc;
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
copy_gnutls_datum(gnutls_datum_t *dest, gnutls_datum_t *source)
{
CRM_ASSERT((dest != NULL) && (source != NULL) && (source->data != NULL));
dest->data = gnutls_malloc(source->size);
CRM_ASSERT(dest->data);
memcpy(dest->data, source->data, source->size);
dest->size = source->size;
}
static void
clear_gnutls_datum(gnutls_datum_t *datum)
{
gnutls_free(datum->data);
datum->data = NULL;
datum->size = 0;
}
#define KEY_READ_LEN 256 // Chunk size for reading key from file
// \return Standard Pacemaker return code
static int
read_gnutls_key(const char *location, gnutls_datum_t *key)
{
FILE *stream = NULL;
size_t buf_len = KEY_READ_LEN;
if ((location == NULL) || (key == NULL)) {
return EINVAL;
}
stream = fopen(location, "r");
if (stream == NULL) {
return errno;
}
key->data = gnutls_malloc(buf_len);
key->size = 0;
while (!feof(stream)) {
int next = fgetc(stream);
if (next == EOF) {
if (!feof(stream)) {
crm_warn("Pacemaker Remote key read was partially successful "
"(copy in memory may be corrupted)");
}
break;
}
if (key->size == buf_len) {
buf_len = key->size + KEY_READ_LEN;
key->data = gnutls_realloc(key->data, buf_len);
CRM_ASSERT(key->data);
}
key->data[key->size++] = (unsigned char) next;
}
fclose(stream);
if (key->size == 0) {
clear_gnutls_datum(key);
return ENOKEY;
}
return pcmk_rc_ok;
}
// Cache the most recently used Pacemaker Remote authentication key
struct key_cache_s {
time_t updated; // When cached key was read (valid for 1 minute)
const char *location; // Where cached key was read from
gnutls_datum_t key; // Cached key
};
static bool
key_is_cached(struct key_cache_s *key_cache)
{
return key_cache->updated != 0;
}
static bool
key_cache_expired(struct key_cache_s *key_cache)
{
return (time(NULL) - key_cache->updated) >= 60;
}
static void
clear_key_cache(struct key_cache_s *key_cache)
{
clear_gnutls_datum(&(key_cache->key));
if ((key_cache->updated != 0) || (key_cache->location != NULL)) {
key_cache->updated = 0;
key_cache->location = NULL;
crm_debug("Cleared Pacemaker Remote key cache");
}
}
static void
get_cached_key(struct key_cache_s *key_cache, gnutls_datum_t *key)
{
copy_gnutls_datum(key, &(key_cache->key));
crm_debug("Using cached Pacemaker Remote key from %s",
pcmk__s(key_cache->location, "unknown location"));
}
static void
cache_key(struct key_cache_s *key_cache, gnutls_datum_t *key,
const char *location)
{
key_cache->updated = time(NULL);
key_cache->location = location;
copy_gnutls_datum(&(key_cache->key), key);
crm_debug("Using (and cacheing) Pacemaker Remote key from %s",
pcmk__s(location, "unknown location"));
}
/*!
* \internal
* \brief Get Pacemaker Remote authentication key from file or cache
*
* \param[in] location Path to key file to try (this memory must
* persist across all calls of this function)
* \param[out] key Key from location or cache
*
* \return Standard Pacemaker return code
*/
static int
get_remote_key(const char *location, gnutls_datum_t *key)
{
static struct key_cache_s key_cache = { 0, };
int rc = pcmk_rc_ok;
if ((location == NULL) || (key == NULL)) {
return EINVAL;
}
if (key_is_cached(&key_cache)) {
if (key_cache_expired(&key_cache)) {
clear_key_cache(&key_cache);
} else {
get_cached_key(&key_cache, key);
return pcmk_rc_ok;
}
}
rc = read_gnutls_key(location, key);
if (rc != pcmk_rc_ok) {
return rc;
}
cache_key(&key_cache, key, location);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Initialize the Pacemaker Remote authentication key
*
* Try loading the Pacemaker Remote authentication key from cache if available,
* otherwise from these locations, in order of preference: the value of the
* PCMK_authkey_location environment variable, if set; the Pacemaker default key
* file location; or (for historical reasons) /etc/corosync/authkey.
*
* \param[out] key Where to store key
*
* \return Standard Pacemaker return code
*/
int
lrmd__init_remote_key(gnutls_datum_t *key)
{
static const char *env_location = NULL;
static bool need_env = true;
int env_rc = pcmk_rc_ok;
int default_rc = pcmk_rc_ok;
int alt_rc = pcmk_rc_ok;
bool env_is_default = false;
bool env_is_fallback = false;
if (need_env) {
env_location = getenv("PCMK_authkey_location");
need_env = false;
}
// Try location in environment variable, if set
if (env_location != NULL) {
env_rc = get_remote_key(env_location, key);
if (env_rc == pcmk_rc_ok) {
return pcmk_rc_ok;
}
env_is_default = !strcmp(env_location, DEFAULT_REMOTE_KEY_LOCATION);
env_is_fallback = !strcmp(env_location, ALT_REMOTE_KEY_LOCATION);
/* @TODO It would be more secure to fail, rather than fall back to the
* default, if an explicitly set key location is not readable, and it
* would be better to never use the Corosync location as a fallback.
* However, that would break any deployments currently working with the
* fallbacks.
*/
}
// Try default location, if environment wasn't explicitly set to it
if (env_is_default) {
default_rc = env_rc;
} else {
default_rc = get_remote_key(DEFAULT_REMOTE_KEY_LOCATION, key);
}
// Try fallback location, if environment wasn't set to it and default failed
if (env_is_fallback) {
alt_rc = env_rc;
} else if (default_rc != pcmk_rc_ok) {
alt_rc = get_remote_key(ALT_REMOTE_KEY_LOCATION, key);
}
// We have all results, so log and return
if ((env_rc != pcmk_rc_ok) && (default_rc != pcmk_rc_ok)
&& (alt_rc != pcmk_rc_ok)) { // Environment set, everything failed
crm_warn("Could not read Pacemaker Remote key from %s (%s%s%s%s%s): %s",
env_location,
env_is_default? "" : "or default location ",
env_is_default? "" : DEFAULT_REMOTE_KEY_LOCATION,
!env_is_default && !env_is_fallback? " " : "",
env_is_fallback? "" : "or fallback location ",
env_is_fallback? "" : ALT_REMOTE_KEY_LOCATION,
pcmk_rc_str(env_rc));
return ENOKEY;
}
if (env_rc != pcmk_rc_ok) { // Environment set but failed, using a default
crm_warn("Could not read Pacemaker Remote key from %s "
"(using %s location %s instead): %s",
env_location,
(default_rc == pcmk_rc_ok)? "default" : "fallback",
(default_rc == pcmk_rc_ok)? DEFAULT_REMOTE_KEY_LOCATION : ALT_REMOTE_KEY_LOCATION,
pcmk_rc_str(env_rc));
return pcmk_rc_ok;
}
if ((default_rc != pcmk_rc_ok) && (alt_rc != pcmk_rc_ok)) {
// Environment unset, defaults failed
crm_warn("Could not read Pacemaker Remote key from default location %s"
" (or fallback location %s): %s",
DEFAULT_REMOTE_KEY_LOCATION, ALT_REMOTE_KEY_LOCATION,
pcmk_rc_str(default_rc));
return ENOKEY;
}
return pcmk_rc_ok; // Environment variable unset, a default worked
}
static void
lrmd_gnutls_global_init(void)
{
static int gnutls_init = 0;
if (!gnutls_init) {
crm_gnutls_global_init();
}
gnutls_init = 1;
}
#endif
static void
report_async_connection_result(lrmd_t * lrmd, int rc)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->callback) {
lrmd_event_data_t event = { 0, };
event.type = lrmd_event_connect;
event.remote_nodename = native->remote_nodename;
event.connection_rc = rc;
native->callback(&event);
}
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static inline int
lrmd__tls_client_handshake(pcmk__remote_t *remote)
{
return pcmk__tls_client_handshake(remote, LRMD_CLIENT_HANDSHAKE_TIMEOUT);
}
/*!
* \internal
* \brief Add trigger and file descriptor mainloop sources for TLS
*
* \param[in,out] lrmd API connection with established TLS session
* \param[in] do_handshake Whether to perform executor handshake
*
* \return Standard Pacemaker return code
*/
static int
add_tls_to_mainloop(lrmd_t *lrmd, bool do_handshake)
{
lrmd_private_t *native = lrmd->lrmd_private;
int rc = pcmk_rc_ok;
char *name = crm_strdup_printf("pacemaker-remote-%s:%d",
native->server, native->port);
struct mainloop_fd_callbacks tls_fd_callbacks = {
.dispatch = lrmd_tls_dispatch,
.destroy = lrmd_tls_connection_destroy,
};
native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH,
lrmd_tls_dispatch, lrmd);
native->source = mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd,
&tls_fd_callbacks);
/* Async connections lose the client name provided by the API caller, so we
* have to use our generated name here to perform the executor handshake.
*
* @TODO Keep track of the caller-provided name. Perhaps we should be using
* that name in this function instead of generating one anyway.
*/
if (do_handshake) {
rc = lrmd_handshake(lrmd, name);
rc = pcmk_legacy2rc(rc);
}
free(name);
return rc;
}
static void
lrmd_tcp_connect_cb(void *userdata, int rc, int sock)
{
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
gnutls_datum_t psk_key = { NULL, 0 };
native->async_timer = 0;
if (rc != pcmk_rc_ok) {
lrmd_tls_connection_destroy(lrmd);
crm_info("Could not connect to Pacemaker Remote at %s:%d: %s "
CRM_XS " rc=%d",
native->server, native->port, pcmk_rc_str(rc), rc);
report_async_connection_result(lrmd, pcmk_rc2legacy(rc));
return;
}
/* The TCP connection was successful, so establish the TLS connection.
* @TODO make this async to avoid blocking code in client
*/
native->sock = sock;
rc = lrmd__init_remote_key(&psk_key);
if (rc != pcmk_rc_ok) {
crm_info("Could not connect to Pacemaker Remote at %s:%d: %s "
CRM_XS " rc=%d",
native->server, native->port, pcmk_rc_str(rc), rc);
lrmd_tls_connection_destroy(lrmd);
report_async_connection_result(lrmd, pcmk_rc2legacy(rc));
return;
}
gnutls_psk_allocate_client_credentials(&native->psk_cred_c);
gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW);
gnutls_free(psk_key.data);
native->remote->tls_session = pcmk__new_tls_session(sock, GNUTLS_CLIENT,
GNUTLS_CRD_PSK,
native->psk_cred_c);
if (native->remote->tls_session == NULL) {
lrmd_tls_connection_destroy(lrmd);
report_async_connection_result(lrmd, -EPROTO);
return;
}
if (lrmd__tls_client_handshake(native->remote) != pcmk_rc_ok) {
crm_warn("Disconnecting after TLS handshake with Pacemaker Remote server %s:%d failed",
native->server, native->port);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
native->remote->tls_session = NULL;
lrmd_tls_connection_destroy(lrmd);
report_async_connection_result(lrmd, -EKEYREJECTED);
return;
}
crm_info("TLS connection to Pacemaker Remote server %s:%d succeeded",
native->server, native->port);
rc = add_tls_to_mainloop(lrmd, true);
report_async_connection_result(lrmd, pcmk_rc2legacy(rc));
}
static int
lrmd_tls_connect_async(lrmd_t * lrmd, int timeout /*ms */ )
{
int rc;
int timer_id = 0;
lrmd_private_t *native = lrmd->lrmd_private;
lrmd_gnutls_global_init();
native->sock = -1;
rc = pcmk__connect_remote(native->server, native->port, timeout, &timer_id,
&(native->sock), lrmd, lrmd_tcp_connect_cb);
if (rc != pcmk_rc_ok) {
crm_warn("Pacemaker Remote connection to %s:%d failed: %s "
CRM_XS " rc=%d",
native->server, native->port, pcmk_rc_str(rc), rc);
return pcmk_rc2legacy(rc);
}
native->async_timer = timer_id;
return pcmk_ok;
}
static int
lrmd_tls_connect(lrmd_t * lrmd, int *fd)
{
int rc;
lrmd_private_t *native = lrmd->lrmd_private;
gnutls_datum_t psk_key = { NULL, 0 };
lrmd_gnutls_global_init();
native->sock = -1;
rc = pcmk__connect_remote(native->server, native->port, 0, NULL,
&(native->sock), NULL, NULL);
if (rc != pcmk_rc_ok) {
crm_warn("Pacemaker Remote connection to %s:%d failed: %s "
CRM_XS " rc=%d",
native->server, native->port, pcmk_rc_str(rc), rc);
lrmd_tls_connection_destroy(lrmd);
return -ENOTCONN;
}
rc = lrmd__init_remote_key(&psk_key);
if (rc != pcmk_rc_ok) {
lrmd_tls_connection_destroy(lrmd);
return pcmk_rc2legacy(rc);
}
gnutls_psk_allocate_client_credentials(&native->psk_cred_c);
gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW);
gnutls_free(psk_key.data);
native->remote->tls_session = pcmk__new_tls_session(native->sock, GNUTLS_CLIENT,
GNUTLS_CRD_PSK,
native->psk_cred_c);
if (native->remote->tls_session == NULL) {
lrmd_tls_connection_destroy(lrmd);
return -EPROTO;
}
if (lrmd__tls_client_handshake(native->remote) != pcmk_rc_ok) {
crm_err("Session creation for %s:%d failed", native->server, native->port);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
native->remote->tls_session = NULL;
lrmd_tls_connection_destroy(lrmd);
return -EKEYREJECTED;
}
crm_info("Client TLS connection established with Pacemaker Remote server %s:%d", native->server,
native->port);
if (fd) {
*fd = native->sock;
} else {
add_tls_to_mainloop(lrmd, false);
}
return pcmk_ok;
}
#endif
static int
lrmd_api_connect(lrmd_t * lrmd, const char *name, int *fd)
{
int rc = -ENOTCONN;
lrmd_private_t *native = lrmd->lrmd_private;
switch (native->type) {
case pcmk__client_ipc:
rc = lrmd_ipc_connect(lrmd, fd);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
rc = lrmd_tls_connect(lrmd, fd);
break;
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
rc = -EPROTONOSUPPORT;
}
if (rc == pcmk_ok) {
rc = lrmd_handshake(lrmd, name);
}
return rc;
}
static int
lrmd_api_connect_async(lrmd_t * lrmd, const char *name, int timeout)
{
int rc = pcmk_ok;
lrmd_private_t *native = lrmd->lrmd_private;
CRM_CHECK(native && native->callback, return -EINVAL);
switch (native->type) {
case pcmk__client_ipc:
/* fake async connection with ipc. it should be fast
* enough that we gain very little from async */
rc = lrmd_api_connect(lrmd, name, NULL);
if (!rc) {
report_async_connection_result(lrmd, rc);
}
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
rc = lrmd_tls_connect_async(lrmd, timeout);
if (rc) {
/* connection failed, report rc now */
report_async_connection_result(lrmd, rc);
}
break;
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
rc = -EPROTONOSUPPORT;
}
return rc;
}
static void
lrmd_ipc_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->source != NULL) {
/* Attached to mainloop */
mainloop_del_ipc_client(native->source);
native->source = NULL;
native->ipc = NULL;
} else if (native->ipc) {
/* Not attached to mainloop */
crm_ipc_t *ipc = native->ipc;
native->ipc = NULL;
crm_ipc_close(ipc);
crm_ipc_destroy(ipc);
}
}
#ifdef HAVE_GNUTLS_GNUTLS_H
static void
lrmd_tls_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->remote->tls_session) {
gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR);
gnutls_deinit(*native->remote->tls_session);
gnutls_free(native->remote->tls_session);
native->remote->tls_session = 0;
}
if (native->async_timer) {
g_source_remove(native->async_timer);
native->async_timer = 0;
}
if (native->source != NULL) {
/* Attached to mainloop */
mainloop_del_ipc_client(native->source);
native->source = NULL;
} else if (native->sock) {
close(native->sock);
native->sock = 0;
}
if (native->pending_notify) {
g_list_free_full(native->pending_notify, lrmd_free_xml);
native->pending_notify = NULL;
}
}
#endif
static int
lrmd_api_disconnect(lrmd_t * lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
int rc = pcmk_ok;
crm_info("Disconnecting %s %s executor connection",
pcmk__client_type_str(native->type),
(native->remote_nodename? native->remote_nodename : "local"));
switch (native->type) {
case pcmk__client_ipc:
lrmd_ipc_disconnect(lrmd);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
lrmd_tls_disconnect(lrmd);
break;
#endif
default:
crm_err("Unsupported executor connection type (bug?): %d",
native->type);
rc = -EPROTONOSUPPORT;
}
free(native->token);
native->token = NULL;
free(native->peer_version);
native->peer_version = NULL;
return rc;
}
static int
lrmd_api_register_rsc(lrmd_t * lrmd,
const char *rsc_id,
const char *class,
const char *provider, const char *type, enum lrmd_call_options options)
{
int rc = pcmk_ok;
xmlNode *data = NULL;
if (!class || !type || !rsc_id) {
return -EINVAL;
}
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)
&& (provider == NULL)) {
return -EINVAL;
}
data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
crm_xml_add(data, F_LRMD_CLASS, class);
crm_xml_add(data, F_LRMD_PROVIDER, provider);
crm_xml_add(data, F_LRMD_TYPE, type);
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_REG, data, NULL, 0, options, TRUE);
free_xml(data);
return rc;
}
static int
lrmd_api_unregister_rsc(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_UNREG, data, NULL, 0, options, TRUE);
free_xml(data);
return rc;
}
lrmd_rsc_info_t *
lrmd_new_rsc_info(const char *rsc_id, const char *standard,
const char *provider, const char *type)
{
lrmd_rsc_info_t *rsc_info = calloc(1, sizeof(lrmd_rsc_info_t));
CRM_ASSERT(rsc_info);
pcmk__str_update(&rsc_info->id, rsc_id);
pcmk__str_update(&rsc_info->standard, standard);
pcmk__str_update(&rsc_info->provider, provider);
pcmk__str_update(&rsc_info->type, type);
return rsc_info;
}
lrmd_rsc_info_t *
lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info)
{
return lrmd_new_rsc_info(rsc_info->id, rsc_info->standard,
rsc_info->provider, rsc_info->type);
}
void
lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info)
{
if (!rsc_info) {
return;
}
free(rsc_info->id);
free(rsc_info->type);
free(rsc_info->standard);
free(rsc_info->provider);
free(rsc_info);
}
static lrmd_rsc_info_t *
lrmd_api_get_rsc_info(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options)
{
lrmd_rsc_info_t *rsc_info = NULL;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
xmlNode *output = NULL;
const char *class = NULL;
const char *provider = NULL;
const char *type = NULL;
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
lrmd_send_command(lrmd, LRMD_OP_RSC_INFO, data, &output, 0, options, TRUE);
free_xml(data);
if (!output) {
return NULL;
}
class = crm_element_value(output, F_LRMD_CLASS);
provider = crm_element_value(output, F_LRMD_PROVIDER);
type = crm_element_value(output, F_LRMD_TYPE);
if (!class || !type) {
free_xml(output);
return NULL;
} else if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)
&& !provider) {
free_xml(output);
return NULL;
}
rsc_info = lrmd_new_rsc_info(rsc_id, class, provider, type);
free_xml(output);
return rsc_info;
}
void
lrmd_free_op_info(lrmd_op_info_t *op_info)
{
if (op_info) {
free(op_info->rsc_id);
free(op_info->action);
free(op_info->interval_ms_s);
free(op_info->timeout_ms_s);
free(op_info);
}
}
static int
lrmd_api_get_recurring_ops(lrmd_t *lrmd, const char *rsc_id, int timeout_ms,
enum lrmd_call_options options, GList **output)
{
xmlNode *data = NULL;
xmlNode *output_xml = NULL;
int rc = pcmk_ok;
if (output == NULL) {
return -EINVAL;
}
*output = NULL;
// Send request
if (rsc_id) {
data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
}
rc = lrmd_send_command(lrmd, LRMD_OP_GET_RECURRING, data, &output_xml,
timeout_ms, options, TRUE);
if (data) {
free_xml(data);
}
// Process reply
if ((rc != pcmk_ok) || (output_xml == NULL)) {
return rc;
}
for (xmlNode *rsc_xml = first_named_child(output_xml, F_LRMD_RSC);
(rsc_xml != NULL) && (rc == pcmk_ok);
rsc_xml = crm_next_same_xml(rsc_xml)) {
rsc_id = crm_element_value(rsc_xml, F_LRMD_RSC_ID);
if (rsc_id == NULL) {
crm_err("Could not parse recurring operation information from executor");
continue;
}
for (xmlNode *op_xml = first_named_child(rsc_xml, T_LRMD_RSC_OP);
op_xml != NULL; op_xml = crm_next_same_xml(op_xml)) {
lrmd_op_info_t *op_info = calloc(1, sizeof(lrmd_op_info_t));
if (op_info == NULL) {
rc = -ENOMEM;
break;
}
op_info->rsc_id = strdup(rsc_id);
op_info->action = crm_element_value_copy(op_xml, F_LRMD_RSC_ACTION);
op_info->interval_ms_s = crm_element_value_copy(op_xml,
F_LRMD_RSC_INTERVAL);
op_info->timeout_ms_s = crm_element_value_copy(op_xml,
F_LRMD_TIMEOUT);
*output = g_list_prepend(*output, op_info);
}
}
free_xml(output_xml);
return rc;
}
static void
lrmd_api_set_callback(lrmd_t * lrmd, lrmd_event_callback callback)
{
lrmd_private_t *native = lrmd->lrmd_private;
native->callback = callback;
}
void
lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg))
{
lrmd_private_t *native = lrmd->lrmd_private;
native->proxy_callback = callback;
native->proxy_callback_userdata = userdata;
}
void
lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->proxy_callback) {
crm_log_xml_trace(msg, "PROXY_INBOUND");
native->proxy_callback(lrmd, native->proxy_callback_userdata, msg);
}
}
int
lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg)
{
if (lrmd == NULL) {
return -ENOTCONN;
}
crm_xml_add(msg, F_LRMD_OPERATION, CRM_OP_IPC_FWD);
crm_log_xml_trace(msg, "PROXY_OUTBOUND");
return lrmd_send_xml_no_reply(lrmd, msg);
}
static int
stonith_get_metadata(const char *provider, const char *type, char **output)
{
int rc = pcmk_ok;
stonith_t *stonith_api = stonith_api_new();
if (stonith_api == NULL) {
crm_err("Could not get fence agent meta-data: API memory allocation failed");
return -ENOMEM;
}
rc = stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type,
provider, output, 0);
if ((rc == pcmk_ok) && (*output == NULL)) {
rc = -EIO;
}
stonith_api->cmds->free(stonith_api);
return rc;
}
static int
lrmd_api_get_metadata(lrmd_t *lrmd, const char *standard, const char *provider,
const char *type, char **output,
enum lrmd_call_options options)
{
return lrmd->cmds->get_metadata_params(lrmd, standard, provider, type,
output, options, NULL);
}
static int
lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard,
const char *provider, const char *type,
char **output, enum lrmd_call_options options,
lrmd_key_value_t *params)
{
svc_action_t *action = NULL;
GHashTable *params_table = NULL;
if (!standard || !type) {
lrmd_key_value_freeall(params);
return -EINVAL;
}
if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
lrmd_key_value_freeall(params);
return stonith_get_metadata(provider, type, output);
}
params_table = pcmk__strkey_table(free, free);
for (const lrmd_key_value_t *param = params; param; param = param->next) {
g_hash_table_insert(params_table, strdup(param->key), strdup(param->value));
}
action = services__create_resource_action(type, standard, provider, type,
CRMD_ACTION_METADATA, 0,
CRMD_METADATA_CALL_TIMEOUT,
params_table, 0);
lrmd_key_value_freeall(params);
if (action == NULL) {
return -ENOMEM;
}
if (action->rc != PCMK_OCF_UNKNOWN) {
services_action_free(action);
return -EINVAL;
}
if (!services_action_sync(action)) {
crm_err("Failed to retrieve meta-data for %s:%s:%s",
standard, provider, type);
services_action_free(action);
return -EIO;
}
if (!action->stdout_data) {
crm_err("Failed to receive meta-data for %s:%s:%s",
standard, provider, type);
services_action_free(action);
return -EIO;
}
*output = strdup(action->stdout_data);
services_action_free(action);
return pcmk_ok;
}
static int
lrmd_api_exec(lrmd_t *lrmd, const char *rsc_id, const char *action,
const char *userdata, guint interval_ms,
int timeout, /* ms */
int start_delay, /* ms */
enum lrmd_call_options options, lrmd_key_value_t * params)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
lrmd_key_value_t *tmp = NULL;
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
crm_xml_add(data, F_LRMD_RSC_ACTION, action);
crm_xml_add(data, F_LRMD_RSC_USERDATA_STR, userdata);
crm_xml_add_ms(data, F_LRMD_RSC_INTERVAL, interval_ms);
crm_xml_add_int(data, F_LRMD_TIMEOUT, timeout);
crm_xml_add_int(data, F_LRMD_RSC_START_DELAY, start_delay);
for (tmp = params; tmp; tmp = tmp->next) {
hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args);
}
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_EXEC, data, NULL, timeout, options, TRUE);
free_xml(data);
lrmd_key_value_freeall(params);
return rc;
}
/* timeout is in ms */
static int
lrmd_api_exec_alert(lrmd_t *lrmd, const char *alert_id, const char *alert_path,
int timeout, lrmd_key_value_t *params)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_ALERT);
xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
lrmd_key_value_t *tmp = NULL;
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_ALERT_ID, alert_id);
crm_xml_add(data, F_LRMD_ALERT_PATH, alert_path);
crm_xml_add_int(data, F_LRMD_TIMEOUT, timeout);
for (tmp = params; tmp; tmp = tmp->next) {
hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args);
}
rc = lrmd_send_command(lrmd, LRMD_OP_ALERT_EXEC, data, NULL, timeout,
lrmd_opt_notify_orig_only, TRUE);
free_xml(data);
lrmd_key_value_freeall(params);
return rc;
}
static int
lrmd_api_cancel(lrmd_t *lrmd, const char *rsc_id, const char *action,
guint interval_ms)
{
int rc = pcmk_ok;
xmlNode *data = create_xml_node(NULL, F_LRMD_RSC);
crm_xml_add(data, F_LRMD_ORIGIN, __func__);
crm_xml_add(data, F_LRMD_RSC_ACTION, action);
crm_xml_add(data, F_LRMD_RSC_ID, rsc_id);
crm_xml_add_ms(data, F_LRMD_RSC_INTERVAL, interval_ms);
rc = lrmd_send_command(lrmd, LRMD_OP_RSC_CANCEL, data, NULL, 0, 0, TRUE);
free_xml(data);
return rc;
}
static int
list_stonith_agents(lrmd_list_t ** resources)
{
int rc = 0;
stonith_t *stonith_api = stonith_api_new();
stonith_key_value_t *stonith_resources = NULL;
stonith_key_value_t *dIter = NULL;
if (stonith_api == NULL) {
crm_err("Could not list fence agents: API memory allocation failed");
return -ENOMEM;
}
stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL,
&stonith_resources, 0);
stonith_api->cmds->free(stonith_api);
for (dIter = stonith_resources; dIter; dIter = dIter->next) {
rc++;
if (resources) {
*resources = lrmd_list_add(*resources, dIter->value);
}
}
stonith_key_value_freeall(stonith_resources, 1, 0);
return rc;
}
static int
lrmd_api_list_agents(lrmd_t * lrmd, lrmd_list_t ** resources, const char *class,
const char *provider)
{
int rc = 0;
int stonith_count = 0; // Initially, whether to include stonith devices
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
stonith_count = 1;
} else {
GList *gIter = NULL;
GList *agents = resources_list_agents(class, provider);
for (gIter = agents; gIter != NULL; gIter = gIter->next) {
*resources = lrmd_list_add(*resources, (const char *)gIter->data);
rc++;
}
g_list_free_full(agents, free);
if (!class) {
stonith_count = 1;
}
}
if (stonith_count) {
// Now, if stonith devices are included, how many there are
stonith_count = list_stonith_agents(resources);
if (stonith_count > 0) {
rc += stonith_count;
}
}
if (rc == 0) {
crm_notice("No agents found for class %s", class);
rc = -EPROTONOSUPPORT;
}
return rc;
}
static bool
does_provider_have_agent(const char *agent, const char *provider, const char *class)
{
bool found = false;
GList *agents = NULL;
GList *gIter2 = NULL;
agents = resources_list_agents(class, provider);
for (gIter2 = agents; gIter2 != NULL; gIter2 = gIter2->next) {
if (pcmk__str_eq(agent, gIter2->data, pcmk__str_casei)) {
found = true;
}
}
g_list_free_full(agents, free);
return found;
}
static int
lrmd_api_list_ocf_providers(lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers)
{
int rc = pcmk_ok;
char *provider = NULL;
GList *ocf_providers = NULL;
GList *gIter = NULL;
ocf_providers = resources_list_providers(PCMK_RESOURCE_CLASS_OCF);
for (gIter = ocf_providers; gIter != NULL; gIter = gIter->next) {
provider = gIter->data;
if (!agent || does_provider_have_agent(agent, provider,
PCMK_RESOURCE_CLASS_OCF)) {
*providers = lrmd_list_add(*providers, (const char *)gIter->data);
rc++;
}
}
g_list_free_full(ocf_providers, free);
return rc;
}
static int
lrmd_api_list_standards(lrmd_t * lrmd, lrmd_list_t ** supported)
{
int rc = 0;
GList *standards = NULL;
GList *gIter = NULL;
standards = resources_list_standards();
for (gIter = standards; gIter != NULL; gIter = gIter->next) {
*supported = lrmd_list_add(*supported, (const char *)gIter->data);
rc++;
}
if (list_stonith_agents(NULL) > 0) {
*supported = lrmd_list_add(*supported, PCMK_RESOURCE_CLASS_STONITH);
rc++;
}
g_list_free_full(standards, free);
return rc;
}
/*!
* \internal
* \brief Create an executor API object
*
* \param[out] api Will be set to newly created API object (it is the
* caller's responsibility to free this value with
* lrmd_api_delete() if this function succeeds)
* \param[in] nodename If the object will be used for a remote connection,
* the node name to use in cluster for remote executor
* \param[in] server If the object will be used for a remote connection,
* the resolvable host name to connect to
* \param[in] port If the object will be used for a remote connection,
* port number on \p server to connect to
*
* \return Standard Pacemaker return code
* \note If the caller leaves one of \p nodename or \p server NULL, the other's
* value will be used for both. If the caller leaves both NULL, an API
* object will be created for a local executor connection.
*/
int
lrmd__new(lrmd_t **api, const char *nodename, const char *server, int port)
{
lrmd_private_t *pvt = NULL;
if (api == NULL) {
return EINVAL;
}
*api = NULL;
// Allocate all memory needed
*api = calloc(1, sizeof(lrmd_t));
if (*api == NULL) {
return ENOMEM;
}
pvt = calloc(1, sizeof(lrmd_private_t));
if (pvt == NULL) {
lrmd_api_delete(*api);
*api = NULL;
return ENOMEM;
}
(*api)->lrmd_private = pvt;
// @TODO Do we need to do this for local connections?
pvt->remote = calloc(1, sizeof(pcmk__remote_t));
(*api)->cmds = calloc(1, sizeof(lrmd_api_operations_t));
if ((pvt->remote == NULL) || ((*api)->cmds == NULL)) {
lrmd_api_delete(*api);
*api = NULL;
return ENOMEM;
}
// Set methods
(*api)->cmds->connect = lrmd_api_connect;
(*api)->cmds->connect_async = lrmd_api_connect_async;
(*api)->cmds->is_connected = lrmd_api_is_connected;
(*api)->cmds->poke_connection = lrmd_api_poke_connection;
(*api)->cmds->disconnect = lrmd_api_disconnect;
(*api)->cmds->register_rsc = lrmd_api_register_rsc;
(*api)->cmds->unregister_rsc = lrmd_api_unregister_rsc;
(*api)->cmds->get_rsc_info = lrmd_api_get_rsc_info;
(*api)->cmds->get_recurring_ops = lrmd_api_get_recurring_ops;
(*api)->cmds->set_callback = lrmd_api_set_callback;
(*api)->cmds->get_metadata = lrmd_api_get_metadata;
(*api)->cmds->exec = lrmd_api_exec;
(*api)->cmds->cancel = lrmd_api_cancel;
(*api)->cmds->list_agents = lrmd_api_list_agents;
(*api)->cmds->list_ocf_providers = lrmd_api_list_ocf_providers;
(*api)->cmds->list_standards = lrmd_api_list_standards;
(*api)->cmds->exec_alert = lrmd_api_exec_alert;
(*api)->cmds->get_metadata_params = lrmd_api_get_metadata_params;
if ((nodename == NULL) && (server == NULL)) {
pvt->type = pcmk__client_ipc;
} else {
#ifdef HAVE_GNUTLS_GNUTLS_H
if (nodename == NULL) {
nodename = server;
} else if (server == NULL) {
server = nodename;
}
pvt->type = pcmk__client_tls;
pvt->remote_nodename = strdup(nodename);
pvt->server = strdup(server);
if ((pvt->remote_nodename == NULL) || (pvt->server == NULL)) {
lrmd_api_delete(*api);
*api = NULL;
return ENOMEM;
}
pvt->port = port;
if (pvt->port == 0) {
pvt->port = crm_default_remote_port();
}
#else
crm_err("Cannot communicate with Pacemaker Remote "
"because GnuTLS is not enabled for this build");
lrmd_api_delete(*api);
*api = NULL;
return EOPNOTSUPP;
#endif
}
return pcmk_rc_ok;
}
lrmd_t *
lrmd_api_new(void)
{
lrmd_t *api = NULL;
CRM_ASSERT(lrmd__new(&api, NULL, NULL, 0) == pcmk_rc_ok);
return api;
}
lrmd_t *
lrmd_remote_api_new(const char *nodename, const char *server, int port)
{
lrmd_t *api = NULL;
CRM_ASSERT(lrmd__new(&api, nodename, server, port) == pcmk_rc_ok);
return api;
}
void
lrmd_api_delete(lrmd_t * lrmd)
{
if (lrmd == NULL) {
return;
}
if (lrmd->cmds != NULL) { // Never NULL, but make static analysis happy
if (lrmd->cmds->disconnect != NULL) { // Also never really NULL
lrmd->cmds->disconnect(lrmd); // No-op if already disconnected
}
free(lrmd->cmds);
}
if (lrmd->lrmd_private != NULL) {
lrmd_private_t *native = lrmd->lrmd_private;
#ifdef HAVE_GNUTLS_GNUTLS_H
free(native->server);
#endif
free(native->remote_nodename);
free(native->remote);
free(native->token);
free(native->peer_version);
free(lrmd->lrmd_private);
}
free(lrmd);
}
struct metadata_cb {
void (*callback)(int pid, const pcmk__action_result_t *result,
void *user_data);
void *user_data;
};
/*!
* \internal
* \brief Process asynchronous metadata completion
*
* \param[in,out] action Metadata action that completed
*/
static void
metadata_complete(svc_action_t *action)
{
struct metadata_cb *metadata_cb = (struct metadata_cb *) action->cb_data;
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
pcmk__set_result(&result, action->rc, action->status,
services__exit_reason(action));
pcmk__set_result_output(&result, action->stdout_data, action->stderr_data);
metadata_cb->callback(0, &result, metadata_cb->user_data);
result.action_stdout = NULL; // Prevent free, because action owns it
result.action_stderr = NULL; // Prevent free, because action owns it
pcmk__reset_result(&result);
free(metadata_cb);
}
/*!
* \internal
* \brief Retrieve agent metadata asynchronously
*
* \param[in] rsc Resource agent specification
* \param[in] callback Function to call with result (this will always be
* called, whether by this function directly or later
* via the main loop, and on success the metadata will
* be in its result argument's action_stdout)
* \param[in,out] user_data User data to pass to callback
*
* \return Standard Pacemaker return code
* \note This function is not a lrmd_api_operations_t method because it does not
* need an lrmd_t object and does not go through the executor, but
* executes the agent directly.
*/
int
lrmd__metadata_async(const lrmd_rsc_info_t *rsc,
void (*callback)(int pid,
const pcmk__action_result_t *result,
void *user_data),
void *user_data)
{
svc_action_t *action = NULL;
struct metadata_cb *metadata_cb = NULL;
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
CRM_CHECK(callback != NULL, return EINVAL);
if ((rsc == NULL) || (rsc->standard == NULL) || (rsc->type == NULL)) {
pcmk__set_result(&result, PCMK_OCF_NOT_CONFIGURED,
PCMK_EXEC_ERROR_FATAL,
"Invalid resource specification");
callback(0, &result, user_data);
pcmk__reset_result(&result);
return EINVAL;
}
if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_STONITH) == 0) {
return stonith__metadata_async(rsc->type,
CRMD_METADATA_CALL_TIMEOUT / 1000,
callback, user_data);
}
action = services__create_resource_action(pcmk__s(rsc->id, rsc->type),
rsc->standard, rsc->provider,
rsc->type, CRMD_ACTION_METADATA,
0, CRMD_METADATA_CALL_TIMEOUT,
NULL, 0);
if (action == NULL) {
pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
"Out of memory");
callback(0, &result, user_data);
pcmk__reset_result(&result);
return ENOMEM;
}
if (action->rc != PCMK_OCF_UNKNOWN) {
pcmk__set_result(&result, action->rc, action->status,
services__exit_reason(action));
callback(0, &result, user_data);
pcmk__reset_result(&result);
services_action_free(action);
return EINVAL;
}
action->cb_data = calloc(1, sizeof(struct metadata_cb));
if (action->cb_data == NULL) {
services_action_free(action);
pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
"Out of memory");
callback(0, &result, user_data);
pcmk__reset_result(&result);
return ENOMEM;
}
metadata_cb = (struct metadata_cb *) action->cb_data;
metadata_cb->callback = callback;
metadata_cb->user_data = user_data;
if (!services_action_async(action, metadata_complete)) {
services_action_free(action);
return pcmk_rc_error; // @TODO Derive from action->rc and ->status
}
// The services library has taken responsibility for action
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Set the result of an executor event
*
* \param[in,out] event Executor event to set
* \param[in] rc OCF exit status of event
* \param[in] op_status Executor status of event
* \param[in] exit_reason Human-friendly description of event
*/
void
lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc, int op_status,
const char *exit_reason)
{
if (event == NULL) {
return;
}
event->rc = rc;
event->op_status = op_status;
pcmk__str_update((char **) &event->exit_reason, exit_reason);
}
/*!
* \internal
* \brief Clear an executor event's exit reason, output, and error output
*
* \param[in,out] event Executor event to reset
*/
void
lrmd__reset_result(lrmd_event_data_t *event)
{
if (event == NULL) {
return;
}
free((void *) event->exit_reason);
event->exit_reason = NULL;
free((void *) event->output);
event->output = NULL;
}
/*!
* \internal
* \brief Get the uptime of a remote resource connection
*
* When the cluster connects to a remote resource, part of that resource's
* handshake includes the uptime of the remote resource's connection. This
* uptime is stored in the lrmd_t object.
*
* \return The connection's uptime, or -1 if unknown
*/
time_t
lrmd__uptime(lrmd_t *lrmd)
{
lrmd_private_t *native = lrmd->lrmd_private;
if (native->remote == NULL) {
return -1;
} else {
return native->remote->uptime;
}
}
+
+const char *
+lrmd__node_start_state(lrmd_t *lrmd)
+{
+ lrmd_private_t *native = lrmd->lrmd_private;
+
+ if (native->remote == NULL) {
+ return NULL;
+ } else {
+ return native->remote->start_state;
+ }
+}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 4:51 PM (11 h, 7 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1019001
Default Alt Text
(238 KB)

Event Timeline