Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c
index e2a0d50179..47c6d6eb79 100644
--- a/daemons/controld/controld_cib.c
+++ b/daemons/controld/controld_cib.c
@@ -1,1061 +1,1061 @@
/*
* Copyright 2004-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <unistd.h> /* sleep */
#include <crm/common/alerts_internal.h>
#include <crm/common/xml.h>
#include <crm/crm.h>
#include <crm/lrmd_internal.h>
#include <pacemaker-controld.h>
// Call ID of the most recent in-progress CIB resource update (or 0 if none)
static int pending_rsc_update = 0;
/*!
* \internal
* \brief Respond to a dropped CIB connection
*
* \param[in] user_data CIB connection that dropped
*/
static void
handle_cib_disconnect(gpointer user_data)
{
CRM_LOG_ASSERT(user_data == controld_globals.cib_conn);
controld_trigger_fsa();
controld_globals.cib_conn->state = cib_disconnected;
if (pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
// @TODO This should trigger a reconnect, not a shutdown
crm_crit("Lost connection to the CIB manager, shutting down");
register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
controld_clear_fsa_input_flags(R_CIB_CONNECTED);
} else { // Expected
crm_info("Disconnected from the CIB manager");
}
}
static void
do_cib_updated(const char *event, xmlNode * msg)
{
const xmlNode *patchset = NULL;
const char *client_name = NULL;
crm_debug("Received CIB diff notification: DC=%s", pcmk__btoa(AM_I_DC));
if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) {
return;
}
if (pcmk__cib_element_in_patchset(patchset, PCMK_XE_ALERTS)
|| pcmk__cib_element_in_patchset(patchset, PCMK_XE_CRM_CONFIG)) {
controld_trigger_config();
}
if (!AM_I_DC) {
// We're not in control of the join sequence
return;
}
client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME);
if (!cib__client_triggers_refresh(client_name)) {
// The CIB is still accurate
return;
}
if (pcmk__cib_element_in_patchset(patchset, PCMK_XE_NODES)
|| pcmk__cib_element_in_patchset(patchset, PCMK_XE_STATUS)) {
/* An unsafe client modified the PCMK_XE_NODES or PCMK_XE_STATUS
* section. Ensure the node list is up-to-date, and start the join
* process again so we get everyone's current resource history.
*/
if (client_name == NULL) {
client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTID);
}
crm_notice("Populating nodes and starting an election after %s event "
"triggered by %s",
event, pcmk__s(client_name, "(unidentified client)"));
populate_cib_nodes(controld_node_update_quick|controld_node_update_all,
__func__);
register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL);
}
}
void
controld_disconnect_cib_manager(void)
{
cib_t *cib_conn = controld_globals.cib_conn;
pcmk__assert(cib_conn != NULL);
crm_debug("Disconnecting from the CIB manager");
controld_clear_fsa_input_flags(R_CIB_CONNECTED);
cib_conn->cmds->del_notify_callback(cib_conn, PCMK__VALUE_CIB_DIFF_NOTIFY,
do_cib_updated);
cib_free_callbacks(cib_conn);
if (cib_conn->state != cib_disconnected) {
cib_conn->cmds->set_secondary(cib_conn, cib_discard_reply);
cib_conn->cmds->signoff(cib_conn);
}
}
/* A_CIB_STOP, A_CIB_START, O_CIB_RESTART */
void
do_cib_control(long long action,
enum crmd_fsa_cause cause,
enum crmd_fsa_state cur_state,
enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
static int cib_retries = 0;
cib_t *cib_conn = controld_globals.cib_conn;
void (*dnotify_fn) (gpointer user_data) = handle_cib_disconnect;
void (*update_cb) (const char *event, xmlNodePtr msg) = do_cib_updated;
int rc = pcmk_ok;
pcmk__assert(cib_conn != NULL);
if (pcmk_is_set(action, A_CIB_STOP)) {
if ((cib_conn->state != cib_disconnected)
&& (pending_rsc_update != 0)) {
crm_info("Waiting for resource update %d to complete",
pending_rsc_update);
crmd_fsa_stall(FALSE);
return;
}
controld_disconnect_cib_manager();
}
if (!pcmk_is_set(action, A_CIB_START)) {
return;
}
if (cur_state == S_STOPPING) {
crm_err("Ignoring request to connect to the CIB manager after "
"shutdown");
return;
}
rc = cib_conn->cmds->signon(cib_conn, crm_system_name,
cib_command_nonblocking);
if (rc != pcmk_ok) {
// A short wait that usually avoids stalling the FSA
sleep(1);
rc = cib_conn->cmds->signon(cib_conn, crm_system_name,
cib_command_nonblocking);
}
if (rc != pcmk_ok) {
crm_info("Could not connect to the CIB manager: %s", pcmk_strerror(rc));
} else if (cib_conn->cmds->set_connection_dnotify(cib_conn,
dnotify_fn) != pcmk_ok) {
crm_err("Could not set dnotify callback");
} else if (cib_conn->cmds->add_notify_callback(cib_conn,
PCMK__VALUE_CIB_DIFF_NOTIFY,
update_cb) != pcmk_ok) {
crm_err("Could not set CIB notification callback (update)");
} else {
controld_set_fsa_input_flags(R_CIB_CONNECTED);
cib_retries = 0;
}
if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
cib_retries++;
if (cib_retries < 30) {
crm_warn("Couldn't complete CIB registration %d times... "
"pause and retry", cib_retries);
controld_start_wait_timer();
crmd_fsa_stall(FALSE);
} else {
crm_err("Could not complete CIB registration %d times... "
"hard error", cib_retries);
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
}
}
#define MIN_CIB_OP_TIMEOUT (30)
/*!
* \internal
* \brief Get the timeout (in seconds) that should be used with CIB operations
*
* \return The maximum of 30 seconds, the value of the PCMK_cib_timeout
* environment variable, or 10 seconds times one more than the number of
* nodes in the cluster.
*/
unsigned int
cib_op_timeout(void)
{
unsigned int calculated_timeout = 10U * (pcmk__cluster_num_active_nodes()
+ pcmk__cluster_num_remote_nodes()
+ 1U);
calculated_timeout = QB_MAX(calculated_timeout, MIN_CIB_OP_TIMEOUT);
crm_trace("Calculated timeout: %s",
pcmk__readable_interval(calculated_timeout * 1000));
if (controld_globals.cib_conn) {
controld_globals.cib_conn->call_timeout = calculated_timeout;
}
return calculated_timeout;
}
/*!
* \internal
* \brief Get CIB call options to use local scope if primary is unavailable
*
* \return CIB call options
*/
int
crmd_cib_smart_opt(void)
{
int call_opt = cib_none;
if ((controld_globals.fsa_state == S_ELECTION)
|| (controld_globals.fsa_state == S_PENDING)) {
crm_info("Sending update to local CIB in state: %s",
fsa_state2string(controld_globals.fsa_state));
cib__set_call_options(call_opt, "update", cib_none);
}
return call_opt;
}
static void
cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output,
void *user_data)
{
char *desc = user_data;
if (rc == 0) {
crm_debug("Deletion of %s (via CIB call %d) succeeded", desc, call_id);
} else {
crm_warn("Deletion of %s (via CIB call %d) failed: %s " QB_XS " rc=%d",
desc, call_id, pcmk_strerror(rc), rc);
}
}
// Searches for various portions of PCMK__XE_NODE_STATE to delete
// Match a particular node's PCMK__XE_NODE_STATE (takes node name 1x)
#define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
// Node's lrm section (name 1x)
#define XPATH_NODE_LRM XPATH_NODE_STATE "/" PCMK__XE_LRM
/* Node's PCMK__XE_LRM_RSC_OP entries and PCMK__XE_LRM_RESOURCE entries without
* unexpired lock
* (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x)
*/
#define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" PCMK__XE_LRM_RSC_OP \
"|" XPATH_NODE_STATE \
"//" PCMK__XE_LRM_RESOURCE \
"[not(@" PCMK_OPT_SHUTDOWN_LOCK ") " \
"or " PCMK_OPT_SHUTDOWN_LOCK "<%lld]"
// Node's PCMK__XE_TRANSIENT_ATTRIBUTES section (name 1x)
#define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" PCMK__XE_TRANSIENT_ATTRIBUTES
// Everything under PCMK__XE_NODE_STATE (name 1x)
#define XPATH_NODE_ALL XPATH_NODE_STATE "/*"
/* Unlocked history + transient attributes
* (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x, name 1x)
*/
#define XPATH_NODE_ALL_UNLOCKED XPATH_NODE_LRM_UNLOCKED "|" XPATH_NODE_ATTRS
/*!
* \internal
* \brief Get the XPath and description of a node state section to be deleted
*
* \param[in] uname Desired node
* \param[in] section Subsection of \c PCMK__XE_NODE_STATE to be deleted
* \param[out] xpath Where to store XPath of \p section
* \param[out] desc If not \c NULL, where to store description of \p section
*/
void
controld_node_state_deletion_strings(const char *uname,
enum controld_section_e section,
char **xpath, char **desc)
{
const char *desc_pre = NULL;
// Shutdown locks that started before this time are expired
long long expire = (long long) time(NULL)
- controld_globals.shutdown_lock_limit;
switch (section) {
case controld_section_lrm:
*xpath = crm_strdup_printf(XPATH_NODE_LRM, uname);
desc_pre = "resource history";
break;
case controld_section_lrm_unlocked:
*xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED,
uname, uname, expire);
desc_pre = "resource history (other than shutdown locks)";
break;
case controld_section_attrs:
*xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname);
desc_pre = "transient attributes";
break;
case controld_section_all:
*xpath = crm_strdup_printf(XPATH_NODE_ALL, uname);
desc_pre = "all state";
break;
case controld_section_all_unlocked:
*xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED,
uname, uname, expire, uname);
desc_pre = "all state (other than shutdown locks)";
break;
default:
// We called this function incorrectly
pcmk__assert(false);
break;
}
if (desc != NULL) {
*desc = crm_strdup_printf("%s for node %s", desc_pre, uname);
}
}
/*!
* \internal
* \brief Delete subsection of a node's CIB \c PCMK__XE_NODE_STATE
*
* \param[in] uname Desired node
* \param[in] section Subsection of \c PCMK__XE_NODE_STATE to delete
* \param[in] options CIB call options to use
*/
void
controld_delete_node_state(const char *uname, enum controld_section_e section,
int options)
{
cib_t *cib = controld_globals.cib_conn;
char *xpath = NULL;
char *desc = NULL;
int cib_rc = pcmk_ok;
pcmk__assert((uname != NULL) && (cib != NULL));
controld_node_state_deletion_strings(uname, section, &xpath, &desc);
cib__set_call_options(options, "node state deletion",
cib_xpath|cib_multiple);
cib_rc = cib->cmds->remove(cib, xpath, NULL, options);
fsa_register_cib_callback(cib_rc, desc, cib_delete_callback);
crm_info("Deleting %s (via CIB call %d) " QB_XS " xpath=%s",
desc, cib_rc, xpath);
// CIB library handles freeing desc
free(xpath);
}
// Takes node name and resource ID
#define XPATH_RESOURCE_HISTORY "//" PCMK__XE_NODE_STATE \
"[@" PCMK_XA_UNAME "='%s']/" \
PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES \
"/" PCMK__XE_LRM_RESOURCE \
"[@" PCMK_XA_ID "='%s']"
// @TODO could add "and @PCMK_OPT_SHUTDOWN_LOCK" to limit to locks
/*!
* \internal
* \brief Clear resource history from CIB for a given resource and node
*
* \param[in] rsc_id ID of resource to be cleared
* \param[in] node Node whose resource history should be cleared
* \param[in] user_name ACL user name to use
* \param[in] call_options CIB call options
*
* \return Standard Pacemaker return code
*/
int
controld_delete_resource_history(const char *rsc_id, const char *node,
const char *user_name, int call_options)
{
char *desc = NULL;
char *xpath = NULL;
int rc = pcmk_rc_ok;
cib_t *cib = controld_globals.cib_conn;
CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL);
desc = crm_strdup_printf("resource history for %s on %s", rsc_id, node);
if (cib == NULL) {
crm_err("Unable to clear %s: no CIB connection", desc);
free(desc);
return ENOTCONN;
}
// Ask CIB to delete the entry
xpath = crm_strdup_printf(XPATH_RESOURCE_HISTORY, node, rsc_id);
cib->cmds->set_user(cib, user_name);
rc = cib->cmds->remove(cib, xpath, NULL, call_options|cib_xpath);
cib->cmds->set_user(cib, NULL);
if (rc < 0) {
rc = pcmk_legacy2rc(rc);
crm_err("Could not delete resource status of %s on %s%s%s: %s "
QB_XS " rc=%d", rsc_id, node,
(user_name? " for user " : ""), (user_name? user_name : ""),
pcmk_rc_str(rc), rc);
free(desc);
free(xpath);
return rc;
}
if (pcmk_is_set(call_options, cib_sync_call)) {
if (pcmk_is_set(call_options, cib_dryrun)) {
crm_debug("Deletion of %s would succeed", desc);
} else {
crm_debug("Deletion of %s succeeded", desc);
}
free(desc);
} else {
crm_info("Clearing %s (via CIB call %d) " QB_XS " xpath=%s",
desc, rc, xpath);
fsa_register_cib_callback(rc, desc, cib_delete_callback);
// CIB library handles freeing desc
}
free(xpath);
return pcmk_rc_ok;
}
/*!
* \internal
* \brief Build XML and string of parameters meeting some criteria, for digest
*
* \param[in] op Executor event with parameter table to use
* \param[in] metadata Parsed meta-data for executed resource agent
* \param[in] param_type Flag used for selection criteria
* \param[out] result Will be set to newly created XML with selected
* parameters as attributes
*
* \return Newly allocated space-separated string of parameter names
* \note Selection criteria varies by param_type: for the restart digest, we
* want parameters that are *not* marked reloadable (OCF 1.1) or that
* *are* marked unique (pre-1.1), for both string and XML results; for the
* secure digest, we want parameters that *are* marked private for the
* string, but parameters that are *not* marked private for the XML.
* \note It is the caller's responsibility to free the string return value with
* \p g_string_free() and the XML result with \p pcmk__xml_free().
*/
static GString *
build_parameter_list(const lrmd_event_data_t *op,
const struct ra_metadata_s *metadata,
enum ra_param_flags_e param_type, xmlNode **result)
{
GString *list = NULL;
*result = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
/* Consider all parameters only except private ones to be consistent with
* what scheduler does with calculate_secure_digest().
*/
if (param_type == ra_param_private
&& compare_version(controld_globals.dc_version, "3.16.0") >= 0) {
g_hash_table_foreach(op->params, hash2field, *result);
pcmk__filter_op_for_digest(*result);
}
for (GList *iter = metadata->ra_params; iter != NULL; iter = iter->next) {
struct ra_param_s *param = (struct ra_param_s *) iter->data;
bool accept_for_list = false;
bool accept_for_xml = false;
switch (param_type) {
case ra_param_reloadable:
accept_for_list = !pcmk_is_set(param->rap_flags, param_type);
accept_for_xml = accept_for_list;
break;
case ra_param_unique:
accept_for_list = pcmk_is_set(param->rap_flags, param_type);
accept_for_xml = accept_for_list;
break;
case ra_param_private:
accept_for_list = pcmk_is_set(param->rap_flags, param_type);
accept_for_xml = !accept_for_list;
break;
}
if (accept_for_list) {
crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type));
if (list == NULL) {
// We will later search for " WORD ", so start list with a space
pcmk__add_word(&list, 256, " ");
}
pcmk__add_word(&list, 0, param->rap_name);
} else {
crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type));
}
if (accept_for_xml) {
const char *v = g_hash_table_lookup(op->params, param->rap_name);
if (v != NULL) {
crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v);
crm_xml_add(*result, param->rap_name, v);
}
} else {
crm_trace("Removing attr %s from the xml result", param->rap_name);
pcmk__xe_remove_attr(*result, param->rap_name);
}
}
if (list != NULL) {
// We will later search for " WORD ", so end list with a space
pcmk__add_word(&list, 0, " ");
}
return list;
}
static void
append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
xmlNode *update, const char *version)
{
GString *list = NULL;
char *digest = NULL;
xmlNode *restart = NULL;
CRM_LOG_ASSERT(op->params != NULL);
if (op->interval_ms > 0) {
/* monitors are not reloadable */
return;
}
if (pcmk_is_set(metadata->ra_flags, ra_supports_reload_agent)) {
/* Add parameters not marked reloadable to the PCMK__XA_OP_FORCE_RESTART
* list
*/
list = build_parameter_list(op, metadata, ra_param_reloadable,
&restart);
} else if (pcmk_is_set(metadata->ra_flags, ra_supports_legacy_reload)) {
/* @COMPAT pre-OCF-1.1 resource agents
*
* Before OCF 1.1, Pacemaker abused "unique=0" to indicate
* reloadability. Add any parameters with unique="1" to the
* PCMK__XA_OP_FORCE_RESTART list.
*/
list = build_parameter_list(op, metadata, ra_param_unique, &restart);
} else {
// Resource does not support agent reloads
return;
}
- digest = pcmk__digest_operation(restart);
+ digest = pcmk__digest_op_params(restart);
/* Add PCMK__XA_OP_FORCE_RESTART and PCMK__XA_OP_RESTART_DIGEST to indicate
* the resource supports reload, no matter if it actually supports any
* reloadable parameters
*/
crm_xml_add(update, PCMK__XA_OP_FORCE_RESTART,
(list == NULL)? "" : (const char *) list->str);
crm_xml_add(update, PCMK__XA_OP_RESTART_DIGEST, digest);
if ((list != NULL) && (list->len > 0)) {
crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str);
} else {
crm_trace("%s: %s", op->rsc_id, digest);
}
if (list != NULL) {
g_string_free(list, TRUE);
}
pcmk__xml_free(restart);
free(digest);
}
static void
append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
xmlNode *update, const char *version)
{
GString *list = NULL;
char *digest = NULL;
xmlNode *secure = NULL;
CRM_LOG_ASSERT(op->params != NULL);
/* To keep PCMK__XA_OP_SECURE_PARAMS short, we want it to contain the secure
* parameters but PCMK__XA_OP_SECURE_DIGEST to be based on the insecure ones
*/
list = build_parameter_list(op, metadata, ra_param_private, &secure);
if (list != NULL) {
- digest = pcmk__digest_operation(secure);
+ digest = pcmk__digest_op_params(secure);
crm_xml_add(update, PCMK__XA_OP_SECURE_PARAMS,
(const char *) list->str);
crm_xml_add(update, PCMK__XA_OP_SECURE_DIGEST, digest);
crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str);
g_string_free(list, TRUE);
} else {
crm_trace("%s: no secure parameters", op->rsc_id);
}
pcmk__xml_free(secure);
free(digest);
}
/*!
* \internal
* \brief Create XML for a resource history entry
*
* \param[in] func Function name of caller
* \param[in,out] parent XML to add entry to
* \param[in] rsc Affected resource
* \param[in,out] op Action to add an entry for (or NULL to do nothing)
* \param[in] node_name Node where action occurred
*/
void
controld_add_resource_history_xml_as(const char *func, xmlNode *parent,
const lrmd_rsc_info_t *rsc,
lrmd_event_data_t *op,
const char *node_name)
{
int target_rc = 0;
xmlNode *xml_op = NULL;
struct ra_metadata_s *metadata = NULL;
const char *caller_version = NULL;
lrm_state_t *lrm_state = NULL;
if (op == NULL) {
return;
}
target_rc = rsc_op_expected_rc(op);
caller_version = g_hash_table_lookup(op->params, PCMK_XA_CRM_FEATURE_SET);
CRM_CHECK(caller_version != NULL, caller_version = CRM_FEATURE_SET);
xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc,
controld_globals.cluster->priv->node_name,
func);
if (xml_op == NULL) {
return;
}
if ((rsc == NULL) || (op->params == NULL)
|| !crm_op_needs_metadata(rsc->standard, op->op_type)) {
crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)",
op->op_type, op->rsc_id, op->params, rsc);
return;
}
lrm_state = controld_get_executor_state(node_name, false);
if (lrm_state == NULL) {
crm_warn("Cannot calculate digests for operation " PCMK__OP_FMT
" because we have no connection to executor for %s",
op->rsc_id, op->op_type, op->interval_ms, node_name);
return;
}
/* Ideally the metadata is cached, and the agent is just a fallback.
*
* @TODO Go through all callers and ensure they get metadata asynchronously
* first.
*/
metadata = controld_get_rsc_metadata(lrm_state, rsc,
controld_metadata_from_agent
|controld_metadata_from_cache);
if (metadata == NULL) {
return;
}
crm_trace("Including additional digests for %s:%s:%s",
rsc->standard, rsc->provider, rsc->type);
append_restart_list(op, metadata, xml_op, caller_version);
append_secure_list(op, metadata, xml_op, caller_version);
return;
}
/*!
* \internal
* \brief Record an action as pending in the CIB, if appropriate
*
* \param[in] node_name Node where the action is pending
* \param[in] rsc Resource that action is for
* \param[in,out] op Pending action
*
* \return true if action was recorded in CIB, otherwise false
*/
bool
controld_record_pending_op(const char *node_name, const lrmd_rsc_info_t *rsc,
lrmd_event_data_t *op)
{
const char *record_pending = NULL;
CRM_CHECK((node_name != NULL) && (rsc != NULL) && (op != NULL),
return false);
// Never record certain operation types as pending
if ((op->op_type == NULL) || (op->params == NULL)
|| !controld_action_is_recordable(op->op_type)) {
return false;
}
// Check action's PCMK_META_RECORD_PENDING meta-attribute (defaults to true)
record_pending = crm_meta_value(op->params, PCMK_META_RECORD_PENDING);
if ((record_pending != NULL) && !crm_is_true(record_pending)) {
pcmk__warn_once(pcmk__wo_record_pending,
"The " PCMK_META_RECORD_PENDING " option (for example, "
"for the %s resource's %s operation) is deprecated and "
"will be removed in a future release",
rsc->id, op->op_type);
return false;
}
op->call_id = -1;
op->t_run = time(NULL);
op->t_rcchange = op->t_run;
lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL);
crm_debug("Recording pending %s-interval %s for %s on %s in the CIB",
pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
node_name);
controld_update_resource_history(node_name, rsc, op, 0);
return true;
}
static void
cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
switch (rc) {
case pcmk_ok:
case -pcmk_err_diff_failed:
case -pcmk_err_diff_resync:
crm_trace("Resource history update completed (call=%d rc=%d)",
call_id, rc);
break;
default:
if (call_id > 0) {
crm_warn("Resource history update %d failed: %s "
QB_XS " rc=%d", call_id, pcmk_strerror(rc), rc);
} else {
crm_warn("Resource history update failed: %s " QB_XS " rc=%d",
pcmk_strerror(rc), rc);
}
}
if (call_id == pending_rsc_update) {
pending_rsc_update = 0;
controld_trigger_fsa();
}
}
/* Only successful stops, and probes that found the resource inactive, get locks
* recorded in the history. This ensures the resource stays locked to the node
* until it is active there again after the node comes back up.
*/
static bool
should_preserve_lock(lrmd_event_data_t *op)
{
if (!pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
return false;
}
if (!strcmp(op->op_type, PCMK_ACTION_STOP) && (op->rc == PCMK_OCF_OK)) {
return true;
}
if (!strcmp(op->op_type, PCMK_ACTION_MONITOR)
&& (op->rc == PCMK_OCF_NOT_RUNNING)) {
return true;
}
return false;
}
/*!
* \internal
* \brief Request a CIB update
*
* \param[in] section Section of CIB to update
* \param[in] data New XML of CIB section to update
* \param[in] options CIB call options
* \param[in] callback If not \c NULL, set this as the operation callback
*
* \return Standard Pacemaker return code
*
* \note If \p callback is \p cib_rsc_callback(), the CIB update's call ID is
* stored in \p pending_rsc_update on success.
*/
int
controld_update_cib(const char *section, xmlNode *data, int options,
void (*callback)(xmlNode *, int, int, xmlNode *, void *))
{
cib_t *cib = controld_globals.cib_conn;
int cib_rc = -ENOTCONN;
pcmk__assert(data != NULL);
if (cib != NULL) {
cib_rc = cib->cmds->modify(cib, section, data, options);
if (cib_rc >= 0) {
crm_debug("Submitted CIB update %d for %s section",
cib_rc, section);
}
}
if (callback == NULL) {
if (cib_rc < 0) {
crm_err("Failed to update CIB %s section: %s",
section, pcmk_rc_str(pcmk_legacy2rc(cib_rc)));
}
} else {
if ((cib_rc >= 0) && (callback == cib_rsc_callback)) {
/* Checking for a particular callback is a little hacky, but it
* didn't seem worth adding an output argument for cib_rc for just
* one use case.
*/
pending_rsc_update = cib_rc;
}
fsa_register_cib_callback(cib_rc, NULL, callback);
}
return (cib_rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(cib_rc);
}
/*!
* \internal
* \brief Update resource history entry in CIB
*
* \param[in] node_name Node where action occurred
* \param[in] rsc Resource that action is for
* \param[in,out] op Action to record
* \param[in] lock_time If nonzero, when resource was locked to node
*
* \note On success, the CIB update's call ID will be stored in
* pending_rsc_update.
*/
void
controld_update_resource_history(const char *node_name,
const lrmd_rsc_info_t *rsc,
lrmd_event_data_t *op, time_t lock_time)
{
xmlNode *update = NULL;
xmlNode *xml = NULL;
int call_opt = crmd_cib_smart_opt();
const char *node_id = NULL;
const char *container = NULL;
CRM_CHECK((node_name != NULL) && (op != NULL), return);
if (rsc == NULL) {
crm_warn("Resource %s no longer exists in the executor", op->rsc_id);
controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id);
return;
}
// <status>
update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
// <node_state ...>
xml = pcmk__xe_create(update, PCMK__XE_NODE_STATE);
if (controld_is_local_node(node_name)) {
node_id = controld_globals.our_uuid;
} else {
node_id = node_name;
pcmk__xe_set_bool_attr(xml, PCMK_XA_REMOTE_NODE, true);
}
crm_xml_add(xml, PCMK_XA_ID, node_id);
crm_xml_add(xml, PCMK_XA_UNAME, node_name);
crm_xml_add(xml, PCMK_XA_CRM_DEBUG_ORIGIN, __func__);
// <lrm ...>
xml = pcmk__xe_create(xml, PCMK__XE_LRM);
crm_xml_add(xml, PCMK_XA_ID, node_id);
// <lrm_resources>
xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCES);
// <lrm_resource ...>
xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCE);
crm_xml_add(xml, PCMK_XA_ID, op->rsc_id);
crm_xml_add(xml, PCMK_XA_CLASS, rsc->standard);
crm_xml_add(xml, PCMK_XA_PROVIDER, rsc->provider);
crm_xml_add(xml, PCMK_XA_TYPE, rsc->type);
if (lock_time != 0) {
/* Actions on a locked resource should either preserve the lock by
* recording it with the action result, or clear it.
*/
if (!should_preserve_lock(op)) {
lock_time = 0;
}
crm_xml_add_ll(xml, PCMK_OPT_SHUTDOWN_LOCK, (long long) lock_time);
}
if (op->params != NULL) {
container = g_hash_table_lookup(op->params,
CRM_META "_" PCMK__META_CONTAINER);
if (container != NULL) {
crm_trace("Resource %s is a part of container resource %s",
op->rsc_id, container);
crm_xml_add(xml, PCMK__META_CONTAINER, container);
}
}
// <lrm_resource_op ...> (possibly more than one)
controld_add_resource_history_xml(xml, rsc, op, node_name);
/* Update CIB asynchronously. Even if it fails, the resource state should be
* discovered during the next election. Worst case, the node is wrongly
* fenced for running a resource it isn't.
*/
crm_log_xml_trace(update, __func__);
controld_update_cib(PCMK_XE_STATUS, update, call_opt, cib_rsc_callback);
pcmk__xml_free(update);
}
/*!
* \internal
* \brief Erase an LRM history entry from the CIB, given the operation data
*
* \param[in] op Operation whose history should be deleted
*/
void
controld_delete_action_history(const lrmd_event_data_t *op)
{
xmlNode *xml_top = NULL;
CRM_CHECK(op != NULL, return);
xml_top = pcmk__xe_create(NULL, PCMK__XE_LRM_RSC_OP);
crm_xml_add_int(xml_top, PCMK__XA_CALL_ID, op->call_id);
crm_xml_add(xml_top, PCMK__XA_TRANSITION_KEY, op->user_data);
if (op->interval_ms > 0) {
char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms);
/* Avoid deleting last_failure too (if it was a result of this recurring op failing) */
crm_xml_add(xml_top, PCMK_XA_ID, op_id);
free(op_id);
}
crm_debug("Erasing resource operation history for " PCMK__OP_FMT " (call=%d)",
op->rsc_id, op->op_type, op->interval_ms, op->call_id);
controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn,
PCMK_XE_STATUS, xml_top, cib_none);
crm_log_xml_trace(xml_top, "op:cancel");
pcmk__xml_free(xml_top);
}
/* Define xpath to find LRM resource history entry by node and resource */
#define XPATH_HISTORY \
"/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" \
"/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES \
"/" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" \
"/" PCMK__XE_LRM_RSC_OP
/* ... and also by operation key */
#define XPATH_HISTORY_ID XPATH_HISTORY "[@" PCMK_XA_ID "='%s']"
/* ... and also by operation key and operation call ID */
#define XPATH_HISTORY_CALL XPATH_HISTORY \
"[@" PCMK_XA_ID "='%s' and @" PCMK__XA_CALL_ID "='%d']"
/* ... and also by operation key and original operation key */
#define XPATH_HISTORY_ORIG XPATH_HISTORY \
"[@" PCMK_XA_ID "='%s' and @" PCMK__XA_OPERATION_KEY "='%s']"
/*!
* \internal
* \brief Delete a last_failure resource history entry from the CIB
*
* \param[in] rsc_id Name of resource to clear history for
* \param[in] node Name of node to clear history for
* \param[in] action If specified, delete only if this was failed action
* \param[in] interval_ms If \p action is specified, it has this interval
*/
void
controld_cib_delete_last_failure(const char *rsc_id, const char *node,
const char *action, guint interval_ms)
{
char *xpath = NULL;
char *last_failure_key = NULL;
CRM_CHECK((rsc_id != NULL) && (node != NULL), return);
// Generate XPath to match desired entry
last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0);
if (action == NULL) {
xpath = crm_strdup_printf(XPATH_HISTORY_ID, node, rsc_id,
last_failure_key);
} else {
char *action_key = pcmk__op_key(rsc_id, action, interval_ms);
xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, node, rsc_id,
last_failure_key, action_key);
free(action_key);
}
free(last_failure_key);
controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath,
NULL, cib_xpath);
free(xpath);
}
/*!
* \internal
* \brief Delete resource history entry from the CIB, given operation key
*
* \param[in] rsc_id Name of resource to clear history for
* \param[in] node Name of node to clear history for
* \param[in] key Operation key of operation to clear history for
* \param[in] call_id If specified, delete entry only if it has this call ID
*/
void
controld_delete_action_history_by_key(const char *rsc_id, const char *node,
const char *key, int call_id)
{
char *xpath = NULL;
CRM_CHECK((rsc_id != NULL) && (node != NULL) && (key != NULL), return);
if (call_id > 0) {
xpath = crm_strdup_printf(XPATH_HISTORY_CALL, node, rsc_id, key,
call_id);
} else {
xpath = crm_strdup_printf(XPATH_HISTORY_ID, node, rsc_id, key);
}
controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath,
NULL, cib_xpath);
free(xpath);
}
diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c
index ebbdea8660..68c9a6df59 100644
--- a/daemons/fenced/fenced_commands.c
+++ b/daemons/fenced/fenced_commands.c
@@ -1,3651 +1,3651 @@
/*
* Copyright 2009-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <stdbool.h> // bool
#include <stdio.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/utsname.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <ctype.h>
#include <libxml/tree.h> // xmlNode
#include <libxml/xpath.h> // xmlXPathObject, etc.
#include <crm/crm.h>
#include <crm/common/ipc.h>
#include <crm/common/ipc_internal.h>
#include <crm/cluster/internal.h>
#include <crm/common/mainloop.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
#include <crm/common/xml.h>
#include <pacemaker-fenced.h>
static GHashTable *device_table = NULL;
GHashTable *topology = NULL;
static GList *cmd_list = NULL;
static GHashTable *fenced_handlers = NULL;
struct device_search_s {
/* target of fence action */
char *host;
/* requested fence action */
char *action;
/* timeout to use if a device is queried dynamically for possible targets */
// @TODO This name is misleading now, it's the value of stonith-timeout
int per_device_timeout;
/* number of registered fencing devices at time of request */
int replies_needed;
/* number of device replies received so far */
int replies_received;
/* whether the target is eligible to perform requested action (or off) */
bool allow_self;
/* private data to pass to search callback function */
void *user_data;
/* function to call when all replies have been received */
void (*callback) (GList * devices, void *user_data);
/* devices capable of performing requested action (or off if remapping) */
GList *capable;
/* Whether to perform searches that support the action */
uint32_t support_action_only;
};
static gboolean stonith_device_dispatch(gpointer user_data);
static void st_child_done(int pid, const pcmk__action_result_t *result,
void *user_data);
static void search_devices_record_result(struct device_search_s *search, const char *device,
gboolean can_fence);
static int get_agent_metadata(const char *agent, xmlNode **metadata);
static void read_action_metadata(fenced_device_t *device);
static enum fenced_target_by unpack_level_kind(const xmlNode *level);
typedef struct {
int id;
uint32_t options;
int default_timeout; /* seconds */
int timeout; /* seconds */
int start_delay; // seconds (-1 means disable static/random fencing delays)
int delay_id;
char *op;
char *origin;
char *client;
char *client_name;
char *remote_op_id;
char *target;
char *action;
char *device;
//! Head of device list (used only for freeing list with command object)
GList *device_list;
//! Next item to process in \c device_list
GList *next_device_iter;
void *internal_user_data;
void (*done_cb) (int pid, const pcmk__action_result_t *result,
void *user_data);
fenced_device_t *active_on;
fenced_device_t *activating_on;
} async_command_t;
static xmlNode *construct_async_reply(const async_command_t *cmd,
const pcmk__action_result_t *result);
/*!
* \internal
* \brief Set a bad fencer API request error in a result object
*
* \param[out] result Result to set
*/
static inline void
set_bad_request_result(pcmk__action_result_t *result)
{
pcmk__set_result(result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID,
"Fencer API request missing required information (bug?)");
}
/*!
* \internal
* \brief Check whether the fencer's device table contains a watchdog device
*
* \retval \c true If the device table contains a watchdog device
* \retval \c false Otherwise
*/
bool
fenced_has_watchdog_device(void)
{
return (device_table != NULL)
&& (g_hash_table_lookup(device_table, STONITH_WATCHDOG_ID) != NULL);
}
/*!
* \internal
* \brief Call a function for each known fence device
*
* \param[in] fn Function to call for each device
* \param[in,out] user_data User data
*/
void
fenced_foreach_device(GHFunc fn, gpointer user_data)
{
if (device_table != NULL) {
g_hash_table_foreach(device_table, fn, user_data);
}
}
/*!
* \internal
* \brief Remove each known fence device matching a given predicate
*
* \param[in] fn Function that returns \c TRUE to remove a fence device or
* \c FALSE to keep it
*/
void
fenced_foreach_device_remove(GHRFunc fn)
{
if (device_table != NULL) {
g_hash_table_foreach_remove(device_table, fn, NULL);
}
}
static gboolean
is_action_required(const char *action, const fenced_device_t *device)
{
return (device != NULL)
&& pcmk_is_set(device->flags, fenced_df_auto_unfence)
&& pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none);
}
static int
get_action_delay_max(const fenced_device_t *device, const char *action)
{
const char *value = NULL;
guint delay_max = 0U;
if (!pcmk__is_fencing_action(action)) {
return 0;
}
value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX);
if (value) {
pcmk_parse_interval_spec(value, &delay_max);
delay_max /= 1000;
}
return (int) delay_max;
}
static int
get_action_delay_base(const fenced_device_t *device, const char *action,
const char *target)
{
char *hash_value = NULL;
guint delay_base = 0U;
if (!pcmk__is_fencing_action(action)) {
return 0;
}
hash_value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE);
if (hash_value) {
char *value = pcmk__str_copy(hash_value);
char *valptr = value;
if (target != NULL) {
for (char *val = strtok(value, "; \t"); val != NULL; val = strtok(NULL, "; \t")) {
char *mapval = strchr(val, ':');
if (mapval == NULL || mapval[1] == 0) {
crm_err("pcmk_delay_base: empty value in mapping", val);
continue;
}
if (mapval != val && strncasecmp(target, val, (size_t)(mapval - val)) == 0) {
value = mapval + 1;
crm_debug("pcmk_delay_base mapped to %s for %s",
value, target);
break;
}
}
}
if (strchr(value, ':') == 0) {
pcmk_parse_interval_spec(value, &delay_base);
delay_base /= 1000;
}
free(valptr);
}
return (int) delay_base;
}
/*!
* \internal
* \brief Override STONITH timeout with pcmk_*_timeout if available
*
* \param[in] device STONITH device to use
* \param[in] action STONITH action name
* \param[in] default_timeout Timeout to use if device does not have
* a pcmk_*_timeout parameter for action
*
* \return Value of pcmk_(action)_timeout if available, otherwise default_timeout
* \note For consistency, it would be nice if reboot/off/on timeouts could be
* set the same way as start/stop/monitor timeouts, i.e. with an
* <operation> entry in the fencing resource configuration. However that
* is insufficient because fencing devices may be registered directly via
* the fencer's register_device() API instead of going through the CIB
* (e.g. stonith_admin uses it for its -R option, and the executor uses it
* to ensure a device is registered when a command is issued). As device
* properties, pcmk_*_timeout parameters can be grabbed by the fencer when
* the device is registered, whether by CIB change or API call.
*/
static int
get_action_timeout(const fenced_device_t *device, const char *action,
int default_timeout)
{
if (action && device && device->params) {
char buffer[64] = { 0, };
const char *value = NULL;
/* If "reboot" was requested but the device does not support it,
* we will remap to "off", so check timeout for "off" instead
*/
if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)
&& !pcmk_is_set(device->flags, fenced_df_supports_reboot)) {
crm_trace("%s doesn't support reboot, using timeout for off instead",
device->id);
action = PCMK_ACTION_OFF;
}
/* If the device config specified an action-specific timeout, use it */
snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action);
value = g_hash_table_lookup(device->params, buffer);
if (value) {
long long timeout_ms = crm_get_msec(value);
return (int) QB_MIN(pcmk__timeout_ms2s(timeout_ms), INT_MAX);
}
}
return default_timeout;
}
/*!
* \internal
* \brief Get the currently executing device for a fencing operation
*
* \param[in] cmd Fencing operation to check
*
* \return Currently executing device for \p cmd if any, otherwise NULL
*/
static fenced_device_t *
cmd_device(const async_command_t *cmd)
{
if ((cmd == NULL) || (cmd->device == NULL) || (device_table == NULL)) {
return NULL;
}
return g_hash_table_lookup(device_table, cmd->device);
}
/*!
* \internal
* \brief Return the configured reboot action for a given device
*
* \param[in] device_id Device ID
*
* \return Configured reboot action for \p device_id
*/
const char *
fenced_device_reboot_action(const char *device_id)
{
const char *action = NULL;
if ((device_table != NULL) && (device_id != NULL)) {
fenced_device_t *device = g_hash_table_lookup(device_table, device_id);
if ((device != NULL) && (device->params != NULL)) {
action = g_hash_table_lookup(device->params, "pcmk_reboot_action");
}
}
return pcmk__s(action, PCMK_ACTION_REBOOT);
}
/*!
* \internal
* \brief Check whether a given device supports the "on" action
*
* \param[in] device_id Device ID
*
* \return true if \p device_id supports "on", otherwise false
*/
bool
fenced_device_supports_on(const char *device_id)
{
if ((device_table != NULL) && (device_id != NULL)) {
fenced_device_t *device = g_hash_table_lookup(device_table, device_id);
if (device != NULL) {
return pcmk_is_set(device->flags, fenced_df_supports_on);
}
}
return false;
}
static void
free_async_command(async_command_t * cmd)
{
if (!cmd) {
return;
}
if (cmd->delay_id) {
g_source_remove(cmd->delay_id);
}
cmd_list = g_list_remove(cmd_list, cmd);
g_list_free_full(cmd->device_list, free);
free(cmd->device);
free(cmd->action);
free(cmd->target);
free(cmd->remote_op_id);
free(cmd->client);
free(cmd->client_name);
free(cmd->origin);
free(cmd->op);
free(cmd);
}
/*!
* \internal
* \brief Create a new asynchronous fencing operation from request XML
*
* \param[in] msg Fencing request XML (from IPC or CPG)
*
* \return Newly allocated fencing operation on success, otherwise NULL
*
* \note This asserts on memory errors, so a NULL return indicates an
* unparseable message.
*/
static async_command_t *
create_async_command(xmlNode *msg)
{
xmlNode *op = NULL;
async_command_t *cmd = NULL;
int rc = pcmk_rc_ok;
if (msg == NULL) {
return NULL;
}
op = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]",
LOG_ERR);
if (op == NULL) {
return NULL;
}
cmd = pcmk__assert_alloc(1, sizeof(async_command_t));
// All messages must include these
cmd->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION);
cmd->op = crm_element_value_copy(msg, PCMK__XA_ST_OP);
cmd->client = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTID);
if ((cmd->action == NULL) || (cmd->op == NULL) || (cmd->client == NULL)) {
free_async_command(cmd);
return NULL;
}
crm_element_value_int(msg, PCMK__XA_ST_CALLID, &(cmd->id));
crm_element_value_int(msg, PCMK__XA_ST_DELAY, &(cmd->start_delay));
crm_element_value_int(msg, PCMK__XA_ST_TIMEOUT, &(cmd->default_timeout));
cmd->timeout = cmd->default_timeout;
rc = pcmk__xe_get_flags(msg, PCMK__XA_ST_CALLOPT, &(cmd->options),
st_opt_none);
if (rc != pcmk_rc_ok) {
crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc));
}
cmd->origin = crm_element_value_copy(msg, PCMK__XA_SRC);
cmd->remote_op_id = crm_element_value_copy(msg, PCMK__XA_ST_REMOTE_OP);
cmd->client_name = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTNAME);
cmd->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET);
cmd->device = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ID);
cmd->done_cb = st_child_done;
// Track in global command list
cmd_list = g_list_append(cmd_list, cmd);
return cmd;
}
static int
get_action_limit(fenced_device_t *device)
{
const char *value = NULL;
int action_limit = 1;
value = g_hash_table_lookup(device->params, PCMK_STONITH_ACTION_LIMIT);
if ((value == NULL)
|| (pcmk__scan_min_int(value, &action_limit, INT_MIN) != pcmk_rc_ok)
|| (action_limit == 0)) {
action_limit = 1;
}
return action_limit;
}
static int
get_active_cmds(fenced_device_t *device)
{
int counter = 0;
GList *gIter = NULL;
GList *gIterNext = NULL;
CRM_CHECK(device != NULL, return 0);
for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
async_command_t *cmd = gIter->data;
gIterNext = gIter->next;
if (cmd->active_on == device) {
counter++;
}
}
return counter;
}
static void
fork_cb(int pid, void *user_data)
{
async_command_t *cmd = (async_command_t *) user_data;
fenced_device_t *device = cmd->activating_on;
if (device == NULL) {
/* In case of a retry, we've done the move from activating_on to
* active_on already
*/
device = cmd->active_on;
}
pcmk__assert(device != NULL);
crm_debug("Operation '%s' [%d]%s%s using %s now running with %ds timeout",
cmd->action, pid,
((cmd->target == NULL)? "" : " targeting "),
pcmk__s(cmd->target, ""), device->id, cmd->timeout);
cmd->active_on = device;
cmd->activating_on = NULL;
}
static int
get_agent_metadata_cb(gpointer data) {
fenced_device_t *device = data;
guint period_ms;
switch (get_agent_metadata(device->agent, &device->agent_metadata)) {
case pcmk_rc_ok:
if (device->agent_metadata) {
read_action_metadata(device);
device->default_host_arg =
stonith__default_host_arg(device->agent_metadata);
}
return G_SOURCE_REMOVE;
case EAGAIN:
period_ms = pcmk__mainloop_timer_get_period(device->timer);
if (period_ms < 160 * 1000) {
mainloop_timer_set_period(device->timer, 2 * period_ms);
}
return G_SOURCE_CONTINUE;
default:
return G_SOURCE_REMOVE;
}
}
/*!
* \internal
* \brief Call a command's action callback for an internal (not library) result
*
* \param[in,out] cmd Command to report result for
* \param[in] execution_status Execution status to use for result
* \param[in] exit_status Exit status to use for result
* \param[in] exit_reason Exit reason to use for result
*/
static void
report_internal_result(async_command_t *cmd, int exit_status,
int execution_status, const char *exit_reason)
{
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
pcmk__set_result(&result, exit_status, execution_status, exit_reason);
cmd->done_cb(0, &result, cmd);
pcmk__reset_result(&result);
}
static gboolean
stonith_device_execute(fenced_device_t *device)
{
int exec_rc = 0;
const char *action_str = NULL;
async_command_t *cmd = NULL;
stonith_action_t *action = NULL;
int active_cmds = 0;
int action_limit = 0;
GList *gIter = NULL;
GList *gIterNext = NULL;
CRM_CHECK(device != NULL, return FALSE);
active_cmds = get_active_cmds(device);
action_limit = get_action_limit(device);
if (action_limit > -1 && active_cmds >= action_limit) {
crm_trace("%s is over its action limit of %d (%u active action%s)",
device->id, action_limit, active_cmds,
pcmk__plural_s(active_cmds));
return TRUE;
}
for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) {
async_command_t *pending_op = gIter->data;
gIterNext = gIter->next;
if (pending_op && pending_op->delay_id) {
crm_trace("Operation '%s'%s%s using %s was asked to run too early, "
"waiting for start delay of %ds",
pending_op->action,
((pending_op->target == NULL)? "" : " targeting "),
pcmk__s(pending_op->target, ""),
device->id, pending_op->start_delay);
continue;
}
device->pending_ops = g_list_remove_link(device->pending_ops, gIter);
g_list_free_1(gIter);
cmd = pending_op;
break;
}
if (cmd == NULL) {
crm_trace("No actions using %s are needed", device->id);
return TRUE;
}
if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT,
STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) {
if (pcmk__is_fencing_action(cmd->action)) {
if (node_does_watchdog_fencing(fenced_get_local_node())) {
pcmk__panic("Watchdog self-fencing required");
goto done;
}
} else {
crm_info("Faking success for %s watchdog operation", cmd->action);
report_internal_result(cmd, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
goto done;
}
}
#if PCMK__ENABLE_CIBSECRETS
exec_rc = pcmk__substitute_secrets(device->id, device->params);
if (exec_rc != pcmk_rc_ok) {
if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) {
crm_info("Proceeding with stop operation for %s "
"despite being unable to load CIB secrets (%s)",
device->id, pcmk_rc_str(exec_rc));
} else {
crm_err("Considering %s unconfigured "
"because unable to load CIB secrets: %s",
device->id, pcmk_rc_str(exec_rc));
report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_SECRETS,
"Failed to get CIB secrets");
goto done;
}
}
#endif
action_str = cmd->action;
if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none)
&& !pcmk_is_set(device->flags, fenced_df_supports_reboot)) {
crm_notice("Remapping 'reboot' action%s%s using %s to 'off' "
"because agent '%s' does not support reboot",
((cmd->target == NULL)? "" : " targeting "),
pcmk__s(cmd->target, ""), device->id, device->agent);
action_str = PCMK_ACTION_OFF;
}
action = stonith__action_create(device->agent, action_str, cmd->target,
cmd->timeout, device->params,
device->aliases, device->default_host_arg);
/* for async exec, exec_rc is negative for early error exit
otherwise handling of success/errors is done via callbacks */
cmd->activating_on = device;
exec_rc = stonith__execute_async(action, (void *)cmd, cmd->done_cb,
fork_cb);
if (exec_rc < 0) {
cmd->activating_on = NULL;
cmd->done_cb(0, stonith__action_result(action), cmd);
stonith__destroy_action(action);
}
done:
/* Device might get triggered to work by multiple fencing commands
* simultaneously. Trigger the device again to make sure any
* remaining concurrent commands get executed. */
if (device->pending_ops) {
mainloop_set_trigger(device->work);
}
return TRUE;
}
static gboolean
stonith_device_dispatch(gpointer user_data)
{
return stonith_device_execute(user_data);
}
static gboolean
start_delay_helper(gpointer data)
{
async_command_t *cmd = data;
fenced_device_t *device = cmd_device(cmd);
cmd->delay_id = 0;
if (device) {
mainloop_set_trigger(device->work);
}
return FALSE;
}
static void
schedule_stonith_command(async_command_t *cmd, fenced_device_t *device)
{
int delay_max = 0;
int delay_base = 0;
int requested_delay = cmd->start_delay;
CRM_CHECK(cmd != NULL, return);
CRM_CHECK(device != NULL, return);
if (cmd->device) {
free(cmd->device);
}
cmd->device = pcmk__str_copy(device->id);
cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout);
if (cmd->remote_op_id) {
crm_debug("Scheduling '%s' action%s%s using %s for remote peer %s "
"with op id %.8s and timeout %ds",
cmd->action,
(cmd->target == NULL)? "" : " targeting ",
pcmk__s(cmd->target, ""),
device->id, cmd->origin, cmd->remote_op_id, cmd->timeout);
} else {
crm_debug("Scheduling '%s' action%s%s using %s for %s with timeout %ds",
cmd->action,
(cmd->target == NULL)? "" : " targeting ",
pcmk__s(cmd->target, ""),
device->id, cmd->client, cmd->timeout);
}
device->pending_ops = g_list_append(device->pending_ops, cmd);
mainloop_set_trigger(device->work);
// Value -1 means disable any static/random fencing delays
if (requested_delay < 0) {
return;
}
delay_max = get_action_delay_max(device, cmd->action);
delay_base = get_action_delay_base(device, cmd->action, cmd->target);
if (delay_max == 0) {
delay_max = delay_base;
}
if (delay_max < delay_base) {
crm_warn(PCMK_STONITH_DELAY_BASE " (%ds) is larger than "
PCMK_STONITH_DELAY_MAX " (%ds) for %s using %s "
"(limiting to maximum delay)",
delay_base, delay_max, cmd->action, device->id);
delay_base = delay_max;
}
if (delay_max > 0) {
cmd->start_delay +=
// coverity[dont_call] It doesn't matter here if rand() is predictable
((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0)
+ delay_base;
}
if (cmd->start_delay > 0) {
crm_notice("Delaying '%s' action%s%s using %s for %ds " QB_XS
" timeout=%ds requested_delay=%ds base=%ds max=%ds",
cmd->action,
(cmd->target == NULL)? "" : " targeting ",
pcmk__s(cmd->target, ""),
device->id, cmd->start_delay, cmd->timeout,
requested_delay, delay_base, delay_max);
cmd->delay_id =
pcmk__create_timer(cmd->start_delay * 1000, start_delay_helper, cmd);
}
}
static void
free_device(gpointer data)
{
GList *gIter = NULL;
fenced_device_t *device = data;
g_hash_table_destroy(device->params);
g_hash_table_destroy(device->aliases);
for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) {
async_command_t *cmd = gIter->data;
crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action);
report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"Device was removed before action could be executed");
}
g_list_free(device->pending_ops);
g_list_free_full(device->targets, free);
if (device->timer) {
mainloop_timer_stop(device->timer);
mainloop_timer_del(device->timer);
}
mainloop_destroy_trigger(device->work);
pcmk__xml_free(device->agent_metadata);
free(device->namespace);
if (device->on_target_actions != NULL) {
g_string_free(device->on_target_actions, TRUE);
}
free(device->agent);
free(device->id);
free(device);
}
/*!
* \internal
* \brief Initialize the table of known fence devices
*/
void
fenced_init_device_table(void)
{
if (device_table == NULL) {
device_table = pcmk__strkey_table(NULL, free_device);
}
}
/*!
* \internal
* \brief Free the table of known fence devices
*/
void
fenced_free_device_table(void)
{
if (device_table != NULL) {
g_hash_table_destroy(device_table);
device_table = NULL;
}
}
static GHashTable *
build_port_aliases(const char *hostmap, GList ** targets)
{
char *name = NULL;
int last = 0, lpc = 0, max = 0, added = 0;
GHashTable *aliases = pcmk__strikey_table(free, free);
if (hostmap == NULL) {
return aliases;
}
max = strlen(hostmap);
for (; lpc <= max; lpc++) {
switch (hostmap[lpc]) {
/* Skip escaped chars */
case '\\':
lpc++;
break;
/* Assignment chars */
case '=':
case ':':
if (lpc > last) {
free(name);
name = pcmk__assert_alloc(1, 1 + lpc - last);
memcpy(name, hostmap + last, lpc - last);
}
last = lpc + 1;
break;
/* Delimeter chars */
/* case ',': Potentially used to specify multiple ports */
case 0:
case ';':
case ' ':
case '\t':
if (name) {
char *value = NULL;
int k = 0;
value = pcmk__assert_alloc(1, 1 + lpc - last);
memcpy(value, hostmap + last, lpc - last);
for (int i = 0; value[i] != '\0'; i++) {
if (value[i] != '\\') {
value[k++] = value[i];
}
}
value[k] = '\0';
crm_debug("Adding alias '%s'='%s'", name, value);
g_hash_table_replace(aliases, name, value);
if (targets) {
*targets = g_list_append(*targets, pcmk__str_copy(value));
}
value = NULL;
name = NULL;
added++;
} else if (lpc > last) {
crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last);
}
last = lpc + 1;
break;
}
if (hostmap[lpc] == 0) {
break;
}
}
if (added == 0) {
crm_info("No host mappings detected in '%s'", hostmap);
}
free(name);
return aliases;
}
GHashTable *metadata_cache = NULL;
void
free_metadata_cache(void) {
if (metadata_cache != NULL) {
g_hash_table_destroy(metadata_cache);
metadata_cache = NULL;
}
}
static void
init_metadata_cache(void) {
if (metadata_cache == NULL) {
metadata_cache = pcmk__strkey_table(free, free);
}
}
int
get_agent_metadata(const char *agent, xmlNode ** metadata)
{
char *buffer = NULL;
if (metadata == NULL) {
return EINVAL;
}
*metadata = NULL;
if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) {
return pcmk_rc_ok;
}
init_metadata_cache();
buffer = g_hash_table_lookup(metadata_cache, agent);
if (buffer == NULL) {
stonith_t *st = stonith__api_new();
int rc;
if (st == NULL) {
crm_warn("Could not get agent meta-data: "
"API memory allocation failed");
return EAGAIN;
}
rc = st->cmds->metadata(st, st_opt_sync_call, agent,
NULL, &buffer, 10);
stonith__api_free(st);
if (rc || !buffer) {
crm_err("Could not retrieve metadata for fencing agent %s", agent);
return EAGAIN;
}
g_hash_table_replace(metadata_cache, pcmk__str_copy(agent), buffer);
}
*metadata = pcmk__xml_parse(buffer);
return pcmk_rc_ok;
}
static void
read_action_metadata(fenced_device_t *device)
{
xmlXPathObject *xpath = NULL;
int max = 0;
int lpc = 0;
if (device->agent_metadata == NULL) {
return;
}
xpath = pcmk__xpath_search(device->agent_metadata->doc,
"//" PCMK_XE_ACTION);
max = pcmk__xpath_num_results(xpath);
if (max == 0) {
xmlXPathFreeObject(xpath);
return;
}
for (lpc = 0; lpc < max; lpc++) {
const char *action = NULL;
xmlNode *match = pcmk__xpath_result(xpath, lpc);
CRM_LOG_ASSERT(match != NULL);
if(match == NULL) { continue; };
action = crm_element_value(match, PCMK_XA_NAME);
if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) {
fenced_device_set_flags(device, fenced_df_supports_list);
} else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) {
fenced_device_set_flags(device, fenced_df_supports_status);
} else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
fenced_device_set_flags(device, fenced_df_supports_reboot);
} else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
/* PCMK_XA_AUTOMATIC means the cluster will unfence a node when it
* joins.
*
* @COMPAT PCMK__XA_REQUIRED is a deprecated synonym for
* PCMK_XA_AUTOMATIC.
*/
if (pcmk__xe_attr_is_true(match, PCMK_XA_AUTOMATIC)
|| pcmk__xe_attr_is_true(match, PCMK__XA_REQUIRED)) {
fenced_device_set_flags(device, fenced_df_auto_unfence);
}
fenced_device_set_flags(device, fenced_df_supports_on);
}
if ((action != NULL)
&& pcmk__xe_attr_is_true(match, PCMK_XA_ON_TARGET)) {
pcmk__add_word(&(device->on_target_actions), 64, action);
}
}
xmlXPathFreeObject(xpath);
}
static const char *
target_list_type(fenced_device_t *dev)
{
const char *check_type = NULL;
check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK);
if (check_type == NULL) {
if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) {
check_type = PCMK_VALUE_STATIC_LIST;
} else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) {
check_type = PCMK_VALUE_STATIC_LIST;
} else if (pcmk_is_set(dev->flags, fenced_df_supports_list)) {
check_type = PCMK_VALUE_DYNAMIC_LIST;
} else if (pcmk_is_set(dev->flags, fenced_df_supports_status)) {
check_type = PCMK_VALUE_STATUS;
} else {
check_type = PCMK_VALUE_NONE;
}
}
return check_type;
}
static fenced_device_t *
build_device_from_xml(const xmlNode *dev)
{
const char *value;
fenced_device_t *device = NULL;
char *agent = crm_element_value_copy(dev, PCMK_XA_AGENT);
CRM_CHECK(agent != NULL, return device);
device = pcmk__assert_alloc(1, sizeof(fenced_device_t));
device->id = crm_element_value_copy(dev, PCMK_XA_ID);
device->agent = agent;
device->namespace = crm_element_value_copy(dev, PCMK__XA_NAMESPACE);
device->params = xml2list(dev);
value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST);
if (value) {
device->targets = stonith__parse_targets(value);
}
value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP);
device->aliases = build_port_aliases(value, &(device->targets));
value = target_list_type(device);
if (!pcmk__str_eq(value, PCMK_VALUE_STATIC_LIST, pcmk__str_casei)
&& (device->targets != NULL)) {
// device->targets is necessary only with PCMK_VALUE_STATIC_LIST
g_list_free_full(device->targets, free);
device->targets = NULL;
}
switch (get_agent_metadata(device->agent, &device->agent_metadata)) {
case pcmk_rc_ok:
if (device->agent_metadata) {
read_action_metadata(device);
device->default_host_arg =
stonith__default_host_arg(device->agent_metadata);
}
break;
case EAGAIN:
if (device->timer == NULL) {
device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000,
TRUE, get_agent_metadata_cb, device);
}
if (!mainloop_timer_running(device->timer)) {
mainloop_timer_start(device->timer);
}
break;
default:
break;
}
value = crm_element_value(dev, PCMK__XA_RSC_PROVIDES);
if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) {
fenced_device_set_flags(device, fenced_df_auto_unfence);
}
if (is_action_required(PCMK_ACTION_ON, device)) {
crm_info("Fencing device '%s' requires unfencing", device->id);
}
if (device->on_target_actions != NULL) {
crm_info("Fencing device '%s' requires actions (%s) to be executed "
"on target", device->id,
(const char *) device->on_target_actions->str);
}
device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
return device;
}
static void
schedule_internal_command(const char *origin, fenced_device_t *device,
const char *action, const char *target, int timeout,
void *internal_user_data,
void (*done_cb) (int pid,
const pcmk__action_result_t *result,
void *user_data))
{
async_command_t *cmd = NULL;
cmd = pcmk__assert_alloc(1, sizeof(async_command_t));
cmd->id = -1;
cmd->default_timeout = timeout ? timeout : 60;
cmd->timeout = cmd->default_timeout;
cmd->action = pcmk__str_copy(action);
cmd->target = pcmk__str_copy(target);
cmd->device = pcmk__str_copy(device->id);
cmd->origin = pcmk__str_copy(origin);
cmd->client = pcmk__str_copy(crm_system_name);
cmd->client_name = pcmk__str_copy(crm_system_name);
cmd->internal_user_data = internal_user_data;
cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */
schedule_stonith_command(cmd, device);
}
// Fence agent status commands use custom exit status codes
enum fence_status_code {
fence_status_invalid = -1,
fence_status_active = 0,
fence_status_unknown = 1,
fence_status_inactive = 2,
};
static void
status_search_cb(int pid, const pcmk__action_result_t *result, void *user_data)
{
async_command_t *cmd = user_data;
struct device_search_s *search = cmd->internal_user_data;
fenced_device_t *dev = cmd_device(cmd);
gboolean can = FALSE;
free_async_command(cmd);
if (!dev) {
search_devices_record_result(search, NULL, FALSE);
return;
}
mainloop_set_trigger(dev->work);
if (result->execution_status != PCMK_EXEC_DONE) {
crm_warn("Assuming %s cannot fence %s "
"because status could not be executed: %s%s%s%s",
dev->id, search->host,
pcmk_exec_status_str(result->execution_status),
((result->exit_reason == NULL)? "" : " ("),
((result->exit_reason == NULL)? "" : result->exit_reason),
((result->exit_reason == NULL)? "" : ")"));
search_devices_record_result(search, dev->id, FALSE);
return;
}
switch (result->exit_status) {
case fence_status_unknown:
crm_trace("%s reported it cannot fence %s", dev->id, search->host);
break;
case fence_status_active:
case fence_status_inactive:
crm_trace("%s reported it can fence %s", dev->id, search->host);
can = TRUE;
break;
default:
crm_warn("Assuming %s cannot fence %s "
"(status returned unknown code %d)",
dev->id, search->host, result->exit_status);
break;
}
search_devices_record_result(search, dev->id, can);
}
static void
dynamic_list_search_cb(int pid, const pcmk__action_result_t *result,
void *user_data)
{
async_command_t *cmd = user_data;
struct device_search_s *search = cmd->internal_user_data;
fenced_device_t *dev = cmd_device(cmd);
gboolean can_fence = FALSE;
free_async_command(cmd);
/* Host/alias must be in the list output to be eligible to be fenced
*
* Will cause problems if down'd nodes aren't listed or (for virtual nodes)
* if the guest is still listed despite being moved to another machine
*/
if (!dev) {
search_devices_record_result(search, NULL, FALSE);
return;
}
mainloop_set_trigger(dev->work);
if (pcmk__result_ok(result)) {
crm_info("Refreshing target list for %s", dev->id);
g_list_free_full(dev->targets, free);
dev->targets = stonith__parse_targets(result->action_stdout);
dev->targets_age = time(NULL);
} else if (dev->targets != NULL) {
if (result->execution_status == PCMK_EXEC_DONE) {
crm_info("Reusing most recent target list for %s "
"because list returned error code %d",
dev->id, result->exit_status);
} else {
crm_info("Reusing most recent target list for %s "
"because list could not be executed: %s%s%s%s",
dev->id, pcmk_exec_status_str(result->execution_status),
((result->exit_reason == NULL)? "" : " ("),
((result->exit_reason == NULL)? "" : result->exit_reason),
((result->exit_reason == NULL)? "" : ")"));
}
} else { // We have never successfully executed list
if (result->execution_status == PCMK_EXEC_DONE) {
crm_warn("Assuming %s cannot fence %s "
"because list returned error code %d",
dev->id, search->host, result->exit_status);
} else {
crm_warn("Assuming %s cannot fence %s "
"because list could not be executed: %s%s%s%s",
dev->id, search->host,
pcmk_exec_status_str(result->execution_status),
((result->exit_reason == NULL)? "" : " ("),
((result->exit_reason == NULL)? "" : result->exit_reason),
((result->exit_reason == NULL)? "" : ")"));
}
/* Fall back to pcmk_host_check=PCMK_VALUE_STATUS if the user didn't
* explicitly specify PCMK_VALUE_DYNAMIC_LIST
*/
if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) {
crm_notice("Switching to pcmk_host_check='status' for %s", dev->id);
pcmk__insert_dup(dev->params, PCMK_STONITH_HOST_CHECK,
PCMK_VALUE_STATUS);
}
}
if (dev->targets) {
const char *alias = g_hash_table_lookup(dev->aliases, search->host);
if (!alias) {
alias = search->host;
}
if (pcmk__str_in_list(alias, dev->targets, pcmk__str_casei)) {
can_fence = TRUE;
}
}
search_devices_record_result(search, dev->id, can_fence);
}
/*!
* \internal
* \brief Returns true if any key in first is not in second or second has a different value for key
*/
static int
device_params_diff(GHashTable *first, GHashTable *second) {
char *key = NULL;
char *value = NULL;
GHashTableIter gIter;
g_hash_table_iter_init(&gIter, first);
while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) {
if(strstr(key, "CRM_meta") == key) {
continue;
} else if (strcmp(key, PCMK_XA_CRM_FEATURE_SET) == 0) {
continue;
} else {
char *other_value = g_hash_table_lookup(second, key);
if (!other_value || !pcmk__str_eq(other_value, value, pcmk__str_casei)) {
crm_trace("Different value for %s: %s != %s", key, other_value, value);
return 1;
}
}
}
return 0;
}
/*!
* \internal
* \brief Checks to see if an identical device already exists in the table
*/
static fenced_device_t *
device_has_duplicate(const fenced_device_t *device)
{
fenced_device_t *dup = g_hash_table_lookup(device_table, device->id);
if (!dup) {
crm_trace("No match for %s", device->id);
return NULL;
} else if (!pcmk__str_eq(dup->agent, device->agent, pcmk__str_casei)) {
crm_trace("Different agent: %s != %s", dup->agent, device->agent);
return NULL;
}
- // Use pcmk__digest_operation() here?
+ // Find a way to share logic with pcmk__digest_op_params() here?
if (device_params_diff(device->params, dup->params) ||
device_params_diff(dup->params, device->params)) {
return NULL;
}
crm_trace("Match");
return dup;
}
int
fenced_device_register(const xmlNode *dev, bool from_cib)
{
const char *local_node_name = fenced_get_local_node();
fenced_device_t *dup = NULL;
fenced_device_t *device = build_device_from_xml(dev);
int rc = pcmk_rc_ok;
CRM_CHECK(device != NULL, return ENOMEM);
/* do we have a watchdog-device? */
if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none)
|| pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT,
STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) {
if (stonith_watchdog_timeout_ms <= 0) {
crm_err("Ignoring watchdog fence device without "
PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " set");
rc = ENODEV;
goto done;
}
if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT,
STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) {
crm_err("Ignoring watchdog fence device with unknown agent '%s' "
"rather than '" STONITH_WATCHDOG_AGENT "'",
pcmk__s(device->agent, ""));
rc = ENODEV;
goto done;
}
if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
crm_err("Ignoring watchdog fence device named '%s' rather than "
"'" STONITH_WATCHDOG_ID "'",
pcmk__s(device->id, ""));
rc = ENODEV;
goto done;
}
if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT,
pcmk__str_none)) {
/* This has either an empty list or the targets configured for
* watchdog fencing
*/
g_list_free_full(stonith_watchdog_targets, free);
stonith_watchdog_targets = device->targets;
device->targets = NULL;
}
if (!node_does_watchdog_fencing(local_node_name)) {
crm_debug("Skip registration of watchdog fence device on node not "
"in host list");
device->targets = NULL;
stonith_device_remove(device->id, from_cib);
goto done;
}
// Proceed as with any other fencing device
g_list_free_full(device->targets, free);
device->targets = stonith__parse_targets(local_node_name);
pcmk__insert_dup(device->params, PCMK_STONITH_HOST_LIST,
local_node_name);
}
dup = device_has_duplicate(device);
if (dup != NULL) {
guint ndevices = g_hash_table_size(device_table);
crm_debug("Device '%s' already in device list (%d active device%s)",
device->id, ndevices, pcmk__plural_s(ndevices));
free_device(device);
device = dup;
fenced_device_clear_flags(device, fenced_df_dirty);
} else {
guint ndevices = 0;
fenced_device_t *old = g_hash_table_lookup(device_table, device->id);
if (from_cib && (old != NULL)
&& pcmk_is_set(old->flags, fenced_df_api_registered)) {
/* If the CIB is writing over an entry that is shared with a stonith
* client, copy any pending ops that currently exist on the old
* entry to the new one. Otherwise the pending ops will be reported
* as failures.
*/
crm_info("Overwriting existing entry for %s from CIB", device->id);
device->pending_ops = old->pending_ops;
fenced_device_set_flags(device, fenced_df_api_registered);
old->pending_ops = NULL;
if (device->pending_ops != NULL) {
mainloop_set_trigger(device->work);
}
}
g_hash_table_replace(device_table, device->id, device);
ndevices = g_hash_table_size(device_table);
crm_notice("Added '%s' to device list (%d active device%s)",
device->id, ndevices, pcmk__plural_s(ndevices));
}
if (from_cib) {
fenced_device_set_flags(device, fenced_df_cib_registered);
} else {
fenced_device_set_flags(device, fenced_df_api_registered);
}
done:
if (rc != pcmk_rc_ok) {
free_device(device);
}
return rc;
}
void
stonith_device_remove(const char *id, bool from_cib)
{
fenced_device_t *device = g_hash_table_lookup(device_table, id);
guint ndevices = 0;
if (device == NULL) {
ndevices = g_hash_table_size(device_table);
crm_info("Device '%s' not found (%u active device%s)", id, ndevices,
pcmk__plural_s(ndevices));
return;
}
if (from_cib) {
fenced_device_clear_flags(device, fenced_df_cib_registered);
} else {
fenced_device_clear_flags(device,
fenced_df_api_registered|fenced_df_verified);
}
if (!pcmk_any_flags_set(device->flags,
fenced_df_api_registered
|fenced_df_cib_registered)) {
g_hash_table_remove(device_table, id);
ndevices = g_hash_table_size(device_table);
crm_info("Removed '%s' from device list (%u active device%s)",
id, ndevices, pcmk__plural_s(ndevices));
} else {
// Exactly one is true at this point
const bool cib_registered = pcmk_is_set(device->flags,
fenced_df_cib_registered);
crm_trace("Not removing '%s' from device list (%u active) because "
"still registered via %s",
id, g_hash_table_size(device_table),
(cib_registered? "CIB" : "API"));
}
}
/*!
* \internal
* \brief Return the number of stonith levels registered for a node
*
* \param[in] tp Node's topology table entry
*
* \return Number of non-NULL levels in topology entry
* \note This function is used only for log messages.
*/
static int
count_active_levels(const stonith_topology_t *tp)
{
int lpc = 0;
int count = 0;
for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) {
if (tp->levels[lpc] != NULL) {
count++;
}
}
return count;
}
static void
free_topology_entry(gpointer data)
{
stonith_topology_t *tp = data;
int lpc = 0;
for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) {
if (tp->levels[lpc] != NULL) {
g_list_free_full(tp->levels[lpc], free);
}
}
free(tp->target);
free(tp->target_value);
free(tp->target_pattern);
free(tp->target_attribute);
free(tp);
}
void
free_topology_list(void)
{
if (topology != NULL) {
g_hash_table_destroy(topology);
topology = NULL;
}
}
void
init_topology_list(void)
{
if (topology == NULL) {
topology = pcmk__strkey_table(NULL, free_topology_entry);
}
}
char *
stonith_level_key(const xmlNode *level, enum fenced_target_by mode)
{
if (mode == fenced_target_by_unknown) {
mode = unpack_level_kind(level);
}
switch (mode) {
case fenced_target_by_name:
return crm_element_value_copy(level, PCMK_XA_TARGET);
case fenced_target_by_pattern:
return crm_element_value_copy(level, PCMK_XA_TARGET_PATTERN);
case fenced_target_by_attribute:
return crm_strdup_printf("%s=%s",
crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE),
crm_element_value(level, PCMK_XA_TARGET_VALUE));
default:
return crm_strdup_printf("unknown-%s", pcmk__xe_id(level));
}
}
/*!
* \internal
* \brief Parse target identification from topology level XML
*
* \param[in] level Topology level XML to parse
*
* \return How to identify target of \p level
*/
static enum fenced_target_by
unpack_level_kind(const xmlNode *level)
{
if (crm_element_value(level, PCMK_XA_TARGET) != NULL) {
return fenced_target_by_name;
}
if (crm_element_value(level, PCMK_XA_TARGET_PATTERN) != NULL) {
return fenced_target_by_pattern;
}
if ((crm_element_value(level, PCMK_XA_TARGET_ATTRIBUTE) != NULL)
&& (crm_element_value(level, PCMK_XA_TARGET_VALUE) != NULL)) {
return fenced_target_by_attribute;
}
return fenced_target_by_unknown;
}
/*!
* \internal
* \brief Unpack essential information from topology request XML
*
* \param[in] xml Request XML to search
* \param[out] mode If not NULL, where to store level kind
* \param[out] target If not NULL, where to store representation of target
* \param[out] id If not NULL, where to store level number
*
* \return Topology level XML from within \p xml, or NULL if not found
* \note The caller is responsible for freeing \p *target if set.
*/
static xmlNode *
unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target,
int *id)
{
enum fenced_target_by local_mode = fenced_target_by_unknown;
char *local_target = NULL;
int local_id = 0;
/* The level element can be the top element or lower. If top level, don't
* search by xpath, because it might give multiple hits if the XML is the
* CIB.
*/
if ((xml != NULL) && !pcmk__xe_is(xml, PCMK_XE_FENCING_LEVEL)) {
xml = pcmk__xpath_find_one(xml->doc, "//" PCMK_XE_FENCING_LEVEL,
LOG_WARNING);
}
if (xml != NULL) {
local_mode = unpack_level_kind(xml);
local_target = stonith_level_key(xml, local_mode);
crm_element_value_int(xml, PCMK_XA_INDEX, &local_id);
}
if (mode != NULL) {
*mode = local_mode;
}
if (id != NULL) {
*id = local_id;
}
if (target != NULL) {
*target = local_target;
} else {
free(local_target);
}
return xml;
}
/*!
* \internal
* \brief Register a fencing topology level for a target
*
* Given an XML request specifying the target name, level index, and device IDs
* for the level, this will create an entry for the target in the global topology
* table if one does not already exist, then append the specified device IDs to
* the entry's device list for the specified level.
*
* \param[in] msg XML request for STONITH level registration
* \param[out] result Where to set result of registration (can be \c NULL)
*/
void
fenced_register_level(xmlNode *msg, pcmk__action_result_t *result)
{
int id = 0;
xmlNode *level;
enum fenced_target_by mode;
char *target;
stonith_topology_t *tp;
const char *value = NULL;
CRM_CHECK(msg != NULL, return);
level = unpack_level_request(msg, &mode, &target, &id);
if (level == NULL) {
set_bad_request_result(result);
return;
}
// Ensure an ID was given (even the client API adds an ID)
if (pcmk__str_empty(pcmk__xe_id(level))) {
crm_warn("Ignoring registration for topology level without ID");
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Topology level is invalid without ID");
return;
}
// Ensure a valid target was specified
if (mode == fenced_target_by_unknown) {
crm_warn("Ignoring registration for topology level '%s' "
"without valid target", pcmk__xe_id(level));
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Invalid target for topology level '%s'",
pcmk__xe_id(level));
return;
}
// Ensure level ID is in allowed range
if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) {
crm_warn("Ignoring topology registration for %s with invalid level %d",
target, id);
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Invalid level number '%s' for topology level '%s'",
pcmk__s(crm_element_value(level, PCMK_XA_INDEX),
""),
pcmk__xe_id(level));
return;
}
/* Find or create topology table entry */
tp = g_hash_table_lookup(topology, target);
if (tp == NULL) {
tp = pcmk__assert_alloc(1, sizeof(stonith_topology_t));
tp->kind = mode;
tp->target = target;
tp->target_value = crm_element_value_copy(level, PCMK_XA_TARGET_VALUE);
tp->target_pattern = crm_element_value_copy(level,
PCMK_XA_TARGET_PATTERN);
tp->target_attribute = crm_element_value_copy(level,
PCMK_XA_TARGET_ATTRIBUTE);
g_hash_table_replace(topology, tp->target, tp);
crm_trace("Added %s (%d) to the topology (%d active entries)",
target, (int) mode, g_hash_table_size(topology));
} else {
free(target);
}
if (tp->levels[id] != NULL) {
crm_info("Adding to the existing %s[%d] topology entry",
tp->target, id);
}
value = crm_element_value(level, PCMK_XA_DEVICES);
if (value != NULL) {
/* Empty string and whitespace are not possible with schema validation
* enabled. Don't bother handling them specially here.
*/
gchar **devices = g_strsplit(value, ",", 0);
for (char **dev = devices; (dev != NULL) && (*dev != NULL); dev++) {
crm_trace("Adding device '%s' for %s[%d]", *dev, tp->target, id);
tp->levels[id] = g_list_append(tp->levels[id],
pcmk__str_copy(*dev));
}
g_strfreev(devices);
}
{
int nlevels = count_active_levels(tp);
crm_info("Target %s has %d active fencing level%s",
tp->target, nlevels, pcmk__plural_s(nlevels));
}
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
}
/*!
* \internal
* \brief Unregister a fencing topology level for a target
*
* Given an XML request specifying the target name and level index (or 0 for all
* levels), this will remove any corresponding entry for the target from the
* global topology table.
*
* \param[in] msg XML request for STONITH level registration
* \param[out] result Where to set result of unregistration (can be \c NULL)
*/
void
fenced_unregister_level(xmlNode *msg, pcmk__action_result_t *result)
{
int id = -1;
stonith_topology_t *tp;
char *target;
xmlNode *level = NULL;
level = unpack_level_request(msg, NULL, &target, &id);
if (level == NULL) {
set_bad_request_result(result);
return;
}
// Ensure level ID is in allowed range
if ((id < 0) || (id >= ST__LEVEL_COUNT)) {
crm_warn("Ignoring topology unregistration for %s with invalid level %d",
target, id);
free(target);
crm_log_xml_trace(level, "Bad level");
pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID,
"Invalid level number '%s' for topology level %s",
pcmk__s(crm_element_value(level, PCMK_XA_INDEX),
"<null>"),
// Client API doesn't add ID to unregistration XML
pcmk__s(pcmk__xe_id(level), ""));
return;
}
tp = g_hash_table_lookup(topology, target);
if (tp == NULL) {
guint nentries = g_hash_table_size(topology);
crm_info("No fencing topology found for %s (%d active %s)",
target, nentries,
pcmk__plural_alt(nentries, "entry", "entries"));
} else if (id == 0 && g_hash_table_remove(topology, target)) {
guint nentries = g_hash_table_size(topology);
crm_info("Removed all fencing topology entries related to %s "
"(%d active %s remaining)", target, nentries,
pcmk__plural_alt(nentries, "entry", "entries"));
} else if (tp->levels[id] != NULL) {
guint nlevels;
g_list_free_full(tp->levels[id], free);
tp->levels[id] = NULL;
nlevels = count_active_levels(tp);
crm_info("Removed level %d from fencing topology for %s "
"(%d active level%s remaining)",
id, target, nlevels, pcmk__plural_s(nlevels));
}
free(target);
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
}
static char *
list_to_string(GList *list, const char *delim, gboolean terminate_with_delim)
{
int max = g_list_length(list);
size_t delim_len = delim?strlen(delim):0;
size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0);
char *rv;
GList *gIter;
char *pos = NULL;
const char *lead_delim = "";
for (gIter = list; gIter != NULL; gIter = gIter->next) {
const char *value = (const char *) gIter->data;
alloc_size += strlen(value);
}
rv = pcmk__assert_alloc(alloc_size, sizeof(char));
pos = rv;
for (gIter = list; gIter != NULL; gIter = gIter->next) {
const char *value = (const char *) gIter->data;
pos = &pos[sprintf(pos, "%s%s", lead_delim, value)];
lead_delim = delim;
}
if (max && terminate_with_delim) {
sprintf(pos, "%s", delim);
}
return rv;
}
/*!
* \internal
* \brief Execute a fence agent action directly (and asynchronously)
*
* Handle a STONITH_OP_EXEC API message by scheduling a requested agent action
* directly on a specified device. Only list, monitor, and status actions are
* expected to use this call, though it should work with any agent command.
*
* \param[in] msg Request XML specifying action
* \param[out] result Where to store result of action
*
* \note If the action is monitor, the device must be registered via the API
* (CIB registration is not sufficient), because monitor should not be
* possible unless the device is "started" (API registered).
*/
static void
execute_agent_action(xmlNode *msg, pcmk__action_result_t *result)
{
xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_ST_DEVICE_ID,
LOG_ERR);
xmlNode *op = pcmk__xpath_find_one(msg->doc,
"//*[@" PCMK__XA_ST_DEVICE_ACTION "]",
LOG_ERR);
const char *id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
const char *action = crm_element_value(op, PCMK__XA_ST_DEVICE_ACTION);
async_command_t *cmd = NULL;
fenced_device_t *device = NULL;
if ((id == NULL) || (action == NULL)) {
crm_info("Malformed API action request: device %s, action %s",
(id? id : "not specified"),
(action? action : "not specified"));
set_bad_request_result(result);
return;
}
if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
// Watchdog agent actions are implemented internally
if (stonith_watchdog_timeout_ms <= 0) {
pcmk__set_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"Watchdog fence device not configured");
return;
} else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) {
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_result_output(result,
list_to_string(stonith_watchdog_targets,
"\n", TRUE),
NULL);
return;
} else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) {
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return;
}
}
device = g_hash_table_lookup(device_table, id);
if (device == NULL) {
crm_info("Ignoring API '%s' action request because device %s not found",
action, id);
pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"'%s' not found", id);
return;
} else if (!pcmk_is_set(device->flags, fenced_df_api_registered)
&& (strcmp(action, PCMK_ACTION_MONITOR) == 0)) {
// Monitors may run only on "started" (API-registered) devices
crm_info("Ignoring API '%s' action request because device %s not active",
action, id);
pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"'%s' not active", id);
return;
}
cmd = create_async_command(msg);
if (cmd == NULL) {
crm_log_xml_warn(msg, "invalid");
set_bad_request_result(result);
return;
}
schedule_stonith_command(cmd, device);
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL);
}
static void
search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence)
{
search->replies_received++;
if (can_fence && device) {
if (search->support_action_only != fenced_df_none) {
fenced_device_t *dev = g_hash_table_lookup(device_table, device);
if (dev && !pcmk_is_set(dev->flags, search->support_action_only)) {
return;
}
}
search->capable = g_list_append(search->capable,
pcmk__str_copy(device));
}
if (search->replies_needed == search->replies_received) {
guint ndevices = g_list_length(search->capable);
crm_debug("Search found %d device%s that can perform '%s' targeting %s",
ndevices, pcmk__plural_s(ndevices),
(search->action? search->action : "unknown action"),
(search->host? search->host : "any node"));
search->callback(search->capable, search->user_data);
free(search->host);
free(search->action);
free(search);
}
}
/*!
* \internal
* \brief Check whether the local host is allowed to execute a fencing action
*
* \param[in] device Fence device to check
* \param[in] action Fence action to check
* \param[in] target Hostname of fence target
* \param[in] allow_self Whether self-fencing is allowed for this operation
*
* \return TRUE if local host is allowed to execute action, FALSE otherwise
*/
static gboolean
localhost_is_eligible(const fenced_device_t *device, const char *action,
const char *target, gboolean allow_self)
{
gboolean localhost_is_target = pcmk__str_eq(target, fenced_get_local_node(),
pcmk__str_casei);
if ((device != NULL) && (action != NULL)
&& (device->on_target_actions != NULL)
&& (strstr((const char*) device->on_target_actions->str,
action) != NULL)) {
if (!localhost_is_target) {
crm_trace("Operation '%s' using %s can only be executed for local "
"host, not %s", action, device->id, target);
return FALSE;
}
} else if (localhost_is_target && !allow_self) {
crm_trace("'%s' operation does not support self-fencing", action);
return FALSE;
}
return TRUE;
}
/*!
* \internal
* \brief Check if local node is allowed to execute (possibly remapped) action
*
* \param[in] device Fence device to check
* \param[in] action Fence action to check
* \param[in] target Node name of fence target
* \param[in] allow_self Whether self-fencing is allowed for this operation
*
* \return true if local node is allowed to execute \p action or any actions it
* might be remapped to, otherwise false
*/
static bool
localhost_is_eligible_with_remap(const fenced_device_t *device,
const char *action, const char *target,
gboolean allow_self)
{
// Check exact action
if (localhost_is_eligible(device, action, target, allow_self)) {
return true;
}
// Check potential remaps
if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* "reboot" might get remapped to "off" then "on", so even if reboot is
* disallowed, return true if either of those is allowed. We'll report
* the disallowed actions with the results. We never allow self-fencing
* for remapped "on" actions because the target is off at that point.
*/
if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self)
|| localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) {
return true;
}
}
return false;
}
/*!
* \internal
* \brief Check whether we can use a device's cached target list
*
* \param[in] dev Fencing device to check
*
* \return \c true if \p dev cached its targets less than a minute ago,
* otherwise \c false
*/
static inline bool
can_use_target_cache(const fenced_device_t *dev)
{
return (dev->targets != NULL) && (time(NULL) < (dev->targets_age + 60));
}
static void
can_fence_host_with_device(fenced_device_t *dev,
struct device_search_s *search)
{
gboolean can = FALSE;
const char *check_type = "Internal bug";
const char *target = NULL;
const char *alias = NULL;
const char *dev_id = "Unspecified device";
const char *action = (search == NULL)? NULL : search->action;
CRM_CHECK((dev != NULL) && (action != NULL), goto search_report_results);
if (dev->id != NULL) {
dev_id = dev->id;
}
target = search->host;
if (target == NULL) {
can = TRUE;
check_type = "No target";
goto search_report_results;
}
/* Answer immediately if the device does not support the action
* or the local node is not allowed to perform it
*/
if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)
&& !pcmk_is_set(dev->flags, fenced_df_supports_on)) {
check_type = "Agent does not support 'on'";
goto search_report_results;
} else if (!localhost_is_eligible_with_remap(dev, action, target,
search->allow_self)) {
check_type = "This node is not allowed to execute action";
goto search_report_results;
}
// Check eligibility as specified by pcmk_host_check
check_type = target_list_type(dev);
alias = g_hash_table_lookup(dev->aliases, target);
if (pcmk__str_eq(check_type, PCMK_VALUE_NONE, pcmk__str_casei)) {
can = TRUE;
} else if (pcmk__str_eq(check_type, PCMK_VALUE_STATIC_LIST,
pcmk__str_casei)) {
if (pcmk__str_in_list(target, dev->targets, pcmk__str_casei)) {
can = TRUE;
} else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)
&& g_hash_table_lookup(dev->aliases, target)) {
can = TRUE;
}
} else if (pcmk__str_eq(check_type, PCMK_VALUE_DYNAMIC_LIST,
pcmk__str_casei)) {
if (!can_use_target_cache(dev)) {
int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST,
search->per_device_timeout);
if (device_timeout > search->per_device_timeout) {
crm_notice("Since the pcmk_list_timeout (%ds) parameter of %s "
"is larger than " PCMK_OPT_STONITH_TIMEOUT
" (%ds), timeout may occur",
device_timeout, dev_id, search->per_device_timeout);
}
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev_id, target, action);
schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL,
search->per_device_timeout, search, dynamic_list_search_cb);
/* we'll respond to this search request async in the cb */
return;
}
if (pcmk__str_in_list(((alias == NULL)? target : alias), dev->targets,
pcmk__str_casei)) {
can = TRUE;
}
} else if (pcmk__str_eq(check_type, PCMK_VALUE_STATUS, pcmk__str_casei)) {
int device_timeout = get_action_timeout(dev, check_type, search->per_device_timeout);
if (device_timeout > search->per_device_timeout) {
crm_notice("Since the pcmk_status_timeout (%ds) parameter of %s is "
"larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), "
"timeout may occur",
device_timeout, dev_id, search->per_device_timeout);
}
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev_id, target, action);
schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target,
search->per_device_timeout, search, status_search_cb);
/* we'll respond to this search request async in the cb */
return;
} else {
crm_err("Invalid value for " PCMK_STONITH_HOST_CHECK ": %s", check_type);
check_type = "Invalid " PCMK_STONITH_HOST_CHECK;
}
search_report_results:
crm_info("%s is%s eligible to fence (%s) %s%s%s%s: %s",
dev_id, (can? "" : " not"), pcmk__s(action, "unspecified action"),
pcmk__s(target, "unspecified target"),
(alias == NULL)? "" : " (as '", pcmk__s(alias, ""),
(alias == NULL)? "" : "')", check_type);
search_devices_record_result(search, ((dev == NULL)? NULL : dev_id), can);
}
static void
search_devices(gpointer key, gpointer value, gpointer user_data)
{
fenced_device_t *dev = value;
struct device_search_s *search = user_data;
can_fence_host_with_device(dev, search);
}
#define DEFAULT_QUERY_TIMEOUT 20
static void
get_capable_devices(const char *host, const char *action, int timeout,
bool allow_self, void *user_data,
void (*callback) (GList * devices, void *user_data),
uint32_t support_action_only)
{
struct device_search_s *search;
guint ndevices = g_hash_table_size(device_table);
if (ndevices == 0) {
callback(NULL, user_data);
return;
}
search = pcmk__assert_alloc(1, sizeof(struct device_search_s));
search->host = pcmk__str_copy(host);
search->action = pcmk__str_copy(action);
search->per_device_timeout = timeout;
search->allow_self = allow_self;
search->callback = callback;
search->user_data = user_data;
search->support_action_only = support_action_only;
/* We are guaranteed this many replies, even if a device is
* unregistered while the search is in progress.
*/
search->replies_needed = ndevices;
crm_debug("Searching %d device%s to see which can execute '%s' targeting %s",
ndevices, pcmk__plural_s(ndevices),
(search->action? search->action : "unknown action"),
(search->host? search->host : "any node"));
fenced_foreach_device(search_devices, search);
}
struct st_query_data {
xmlNode *reply;
char *remote_peer;
char *client_id;
char *target;
char *action;
int call_options;
};
/*!
* \internal
* \brief Add action-specific attributes to query reply XML
*
* \param[in,out] xml XML to add attributes to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
*/
static void
add_action_specific_attributes(xmlNode *xml, const char *action,
const fenced_device_t *device,
const char *target)
{
int action_specific_timeout;
int delay_max;
int delay_base;
CRM_CHECK(xml && action && device, return);
// PCMK__XA_ST_REQUIRED is currently used only for unfencing
if (is_action_required(action, device)) {
crm_trace("Action '%s' is required using %s", action, device->id);
crm_xml_add_int(xml, PCMK__XA_ST_REQUIRED, 1);
}
// pcmk_<action>_timeout if configured
action_specific_timeout = get_action_timeout(device, action, 0);
if (action_specific_timeout) {
crm_trace("Action '%s' has timeout %ds using %s",
action, action_specific_timeout, device->id);
crm_xml_add_int(xml, PCMK__XA_ST_ACTION_TIMEOUT,
action_specific_timeout);
}
delay_max = get_action_delay_max(device, action);
if (delay_max > 0) {
crm_trace("Action '%s' has maximum random delay %ds using %s",
action, delay_max, device->id);
crm_xml_add_int(xml, PCMK__XA_ST_DELAY_MAX, delay_max);
}
delay_base = get_action_delay_base(device, action, target);
if (delay_base > 0) {
crm_xml_add_int(xml, PCMK__XA_ST_DELAY_BASE, delay_base);
}
if ((delay_max > 0) && (delay_base == 0)) {
crm_trace("Action '%s' has maximum random delay %ds using %s",
action, delay_max, device->id);
} else if ((delay_max == 0) && (delay_base > 0)) {
crm_trace("Action '%s' has a static delay of %ds using %s",
action, delay_base, device->id);
} else if ((delay_max > 0) && (delay_base > 0)) {
crm_trace("Action '%s' has a minimum delay of %ds and a randomly chosen "
"maximum delay of %ds using %s",
action, delay_base, delay_max, device->id);
}
}
/*!
* \internal
* \brief Add "disallowed" attribute to query reply XML if appropriate
*
* \param[in,out] xml XML to add attribute to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
* \param[in] allow_self Whether self-fencing is allowed
*/
static void
add_disallowed(xmlNode *xml, const char *action, const fenced_device_t *device,
const char *target, gboolean allow_self)
{
if (!localhost_is_eligible(device, action, target, allow_self)) {
crm_trace("Action '%s' using %s is disallowed for local host",
action, device->id);
pcmk__xe_set_bool_attr(xml, PCMK__XA_ST_ACTION_DISALLOWED, true);
}
}
/*!
* \internal
* \brief Add child element with action-specific values to query reply XML
*
* \param[in,out] xml XML to add attribute to
* \param[in] action Fence action
* \param[in] device Fence device
* \param[in] target Fence target
* \param[in] allow_self Whether self-fencing is allowed
*/
static void
add_action_reply(xmlNode *xml, const char *action,
const fenced_device_t *device, const char *target,
gboolean allow_self)
{
xmlNode *child = pcmk__xe_create(xml, PCMK__XE_ST_DEVICE_ACTION);
crm_xml_add(child, PCMK_XA_ID, action);
add_action_specific_attributes(child, action, device, target);
add_disallowed(child, action, device, target, allow_self);
}
/*!
* \internal
* \brief Send a reply to a CPG peer or IPC client
*
* \param[in] reply XML reply to send
* \param[in] call_options Send synchronously if st_opt_sync_call is set
* \param[in] remote_peer If not NULL, name of peer node to send CPG reply
* \param[in,out] client If not NULL, client to send IPC reply
*/
static void
stonith_send_reply(const xmlNode *reply, int call_options,
const char *remote_peer, pcmk__client_t *client)
{
CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)),
return);
if (remote_peer == NULL) {
do_local_reply(reply, client, call_options);
} else {
const pcmk__node_status_t *node =
pcmk__get_node(0, remote_peer, NULL,
pcmk__node_search_cluster_member);
pcmk__cluster_send_message(node, pcmk_ipc_fenced, reply);
}
}
static void
stonith_query_capable_device_cb(GList * devices, void *user_data)
{
struct st_query_data *query = user_data;
int available_devices = 0;
xmlNode *wrapper = NULL;
xmlNode *list = NULL;
GList *lpc = NULL;
pcmk__client_t *client = NULL;
if (query->client_id != NULL) {
client = pcmk__find_client_by_id(query->client_id);
if ((client == NULL) && (query->remote_peer == NULL)) {
crm_trace("Skipping reply to %s: no longer a client",
query->client_id);
goto done;
}
}
// Pack the results into XML
wrapper = pcmk__xe_create(query->reply, PCMK__XE_ST_CALLDATA);
list = pcmk__xe_create(wrapper, __func__);
crm_xml_add(list, PCMK__XA_ST_TARGET, query->target);
for (lpc = devices; lpc != NULL; lpc = lpc->next) {
fenced_device_t *device = g_hash_table_lookup(device_table, lpc->data);
const char *action = query->action;
xmlNode *dev = NULL;
if (!device) {
/* It is possible the device got unregistered while
* determining who can fence the target */
continue;
}
available_devices++;
dev = pcmk__xe_create(list, PCMK__XE_ST_DEVICE_ID);
crm_xml_add(dev, PCMK_XA_ID, device->id);
crm_xml_add(dev, PCMK__XA_NAMESPACE, device->namespace);
crm_xml_add(dev, PCMK_XA_AGENT, device->agent);
// Has had successful monitor, list, or status on this node
crm_xml_add_int(dev, PCMK__XA_ST_MONITOR_VERIFIED,
pcmk_is_set(device->flags, fenced_df_verified));
crm_xml_add_int(dev, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, device->flags);
/* If the originating fencer wants to reboot the node, and we have a
* capable device that doesn't support "reboot", remap to "off" instead.
*/
if (!pcmk_is_set(device->flags, fenced_df_supports_reboot)
&& pcmk__str_eq(query->action, PCMK_ACTION_REBOOT,
pcmk__str_none)) {
crm_trace("%s doesn't support reboot, using values for off instead",
device->id);
action = PCMK_ACTION_OFF;
}
/* Add action-specific values if available */
add_action_specific_attributes(dev, action, device, query->target);
if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* A "reboot" *might* get remapped to "off" then "on", so after
* sending the "reboot"-specific values in the main element, we add
* sub-elements for "off" and "on" values.
*
* We short-circuited earlier if "reboot", "off" and "on" are all
* disallowed for the local host. However if only one or two are
* disallowed, we send back the results and mark which ones are
* disallowed. If "reboot" is disallowed, this might cause problems
* with older fencer versions, which won't check for it. Older
* versions will ignore "off" and "on", so they are not a problem.
*/
add_disallowed(dev, action, device, query->target,
pcmk_is_set(query->call_options,
st_opt_allow_self_fencing));
add_action_reply(dev, PCMK_ACTION_OFF, device, query->target,
pcmk_is_set(query->call_options,
st_opt_allow_self_fencing));
add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE);
}
/* A query without a target wants device parameters */
if (query->target == NULL) {
xmlNode *attrs = pcmk__xe_create(dev, PCMK__XE_ATTRIBUTES);
g_hash_table_foreach(device->params, hash2field, attrs);
}
}
crm_xml_add_int(list, PCMK__XA_ST_AVAILABLE_DEVICES, available_devices);
if (query->target) {
crm_debug("Found %d matching device%s for target '%s'",
available_devices, pcmk__plural_s(available_devices),
query->target);
} else {
crm_debug("%d device%s installed",
available_devices, pcmk__plural_s(available_devices));
}
crm_log_xml_trace(list, "query-result");
stonith_send_reply(query->reply, query->call_options, query->remote_peer,
client);
done:
pcmk__xml_free(query->reply);
free(query->remote_peer);
free(query->client_id);
free(query->target);
free(query->action);
free(query);
g_list_free_full(devices, free);
}
/*!
* \internal
* \brief Log the result of an asynchronous command
*
* \param[in] cmd Command the result is for
* \param[in] result Result of command
* \param[in] pid Process ID of command, if available
* \param[in] next Alternate device that will be tried if command failed
* \param[in] op_merged Whether this command was merged with an earlier one
*/
static void
log_async_result(const async_command_t *cmd,
const pcmk__action_result_t *result,
int pid, const char *next, bool op_merged)
{
int log_level = LOG_ERR;
int output_log_level = LOG_NEVER;
guint devices_remaining = g_list_length(cmd->next_device_iter);
GString *msg = g_string_sized_new(80); // Reasonable starting size
// Choose log levels appropriately if we have a result
if (pcmk__result_ok(result)) {
log_level = (cmd->target == NULL)? LOG_DEBUG : LOG_NOTICE;
if ((result->action_stdout != NULL)
&& !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA,
pcmk__str_none)) {
output_log_level = LOG_DEBUG;
}
next = NULL;
} else {
log_level = (cmd->target == NULL)? LOG_NOTICE : LOG_ERR;
if ((result->action_stdout != NULL)
&& !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA,
pcmk__str_none)) {
output_log_level = LOG_WARNING;
}
}
// Build the log message piece by piece
pcmk__g_strcat(msg, "Operation '", cmd->action, "' ", NULL);
if (pid != 0) {
g_string_append_printf(msg, "[%d] ", pid);
}
if (cmd->target != NULL) {
pcmk__g_strcat(msg, "targeting ", cmd->target, " ", NULL);
}
if (cmd->device != NULL) {
pcmk__g_strcat(msg, "using ", cmd->device, " ", NULL);
}
// Add exit status or execution status as appropriate
if (result->execution_status == PCMK_EXEC_DONE) {
g_string_append_printf(msg, "returned %d", result->exit_status);
} else {
pcmk__g_strcat(msg, "could not be executed: ",
pcmk_exec_status_str(result->execution_status), NULL);
}
// Add exit reason and next device if appropriate
if (result->exit_reason != NULL) {
pcmk__g_strcat(msg, " (", result->exit_reason, ")", NULL);
}
if (next != NULL) {
pcmk__g_strcat(msg, ", retrying with ", next, NULL);
}
if (devices_remaining > 0) {
g_string_append_printf(msg, " (%u device%s remaining)",
(unsigned int) devices_remaining,
pcmk__plural_s(devices_remaining));
}
g_string_append_printf(msg, " " QB_XS " %scall %d from %s",
(op_merged? "merged " : ""), cmd->id,
cmd->client_name);
// Log the result
do_crm_log(log_level, "%s", msg->str);
g_string_free(msg, TRUE);
// Log the output (which may have multiple lines), if appropriate
if (output_log_level != LOG_NEVER) {
char *prefix = crm_strdup_printf("%s[%d]", cmd->device, pid);
crm_log_output(output_log_level, prefix, result->action_stdout);
free(prefix);
}
}
/*!
* \internal
* \brief Reply to requester after asynchronous command completion
*
* \param[in] cmd Command that completed
* \param[in] result Result of command
* \param[in] pid Process ID of command, if available
* \param[in] merged If true, command was merged with another, not executed
*/
static void
send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result,
int pid, bool merged)
{
xmlNode *reply = NULL;
pcmk__client_t *client = NULL;
CRM_CHECK((cmd != NULL) && (result != NULL), return);
log_async_result(cmd, result, pid, NULL, merged);
if (cmd->client != NULL) {
client = pcmk__find_client_by_id(cmd->client);
if ((client == NULL) && (cmd->origin == NULL)) {
crm_trace("Skipping reply to %s: no longer a client", cmd->client);
return;
}
}
reply = construct_async_reply(cmd, result);
if (merged) {
pcmk__xe_set_bool_attr(reply, PCMK__XA_ST_OP_MERGED, true);
}
if (pcmk__is_fencing_action(cmd->action)
&& pcmk__str_eq(cmd->origin, cmd->target, pcmk__str_casei)) {
/* The target was also the originator, so broadcast the result on its
* behalf (since it will be unable to).
*/
crm_trace("Broadcast '%s' result for %s (target was also originator)",
cmd->action, cmd->target);
crm_xml_add(reply, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST);
crm_xml_add(reply, PCMK__XA_ST_OP, STONITH_OP_NOTIFY);
pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, reply);
} else {
// Reply only to the originator
stonith_send_reply(reply, cmd->options, cmd->origin, client);
}
crm_log_xml_trace(reply, "Reply");
pcmk__xml_free(reply);
}
static void
cancel_stonith_command(async_command_t * cmd)
{
fenced_device_t *device = cmd_device(cmd);
if (device) {
crm_trace("Cancel scheduled '%s' action using %s",
cmd->action, device->id);
device->pending_ops = g_list_remove(device->pending_ops, cmd);
}
}
/*!
* \internal
* \brief Cancel and reply to any duplicates of a just-completed operation
*
* Check whether any fencing operations are scheduled to do the same thing as
* one that just succeeded. If so, rather than performing the same operation
* twice, return the result of this operation for all matching pending commands.
*
* \param[in,out] cmd Fencing operation that just succeeded
* \param[in] result Result of \p cmd
* \param[in] pid If nonzero, process ID of agent invocation (for logs)
*
* \note Duplicate merging will do the right thing for either type of remapped
* reboot. If the executing fencer remapped an unsupported reboot to off,
* then cmd->action will be "reboot" and will be merged with any other
* reboot requests. If the originating fencer remapped a topology reboot
* to off then on, we will get here once with cmd->action "off" and once
* with "on", and they will be merged separately with similar requests.
*/
static void
reply_to_duplicates(async_command_t *cmd, const pcmk__action_result_t *result,
int pid)
{
GList *next = NULL;
for (GList *iter = cmd_list; iter != NULL; iter = next) {
async_command_t *cmd_other = iter->data;
next = iter->next; // We might delete this entry, so grab next now
if (cmd == cmd_other) {
continue;
}
/* A pending operation matches if:
* 1. The client connections are different.
* 2. The target is the same.
* 3. The fencing action is the same.
* 4. The device scheduled to execute the action is the same.
*/
if (pcmk__str_eq(cmd->client, cmd_other->client, pcmk__str_casei) ||
!pcmk__str_eq(cmd->target, cmd_other->target, pcmk__str_casei) ||
!pcmk__str_eq(cmd->action, cmd_other->action, pcmk__str_none) ||
!pcmk__str_eq(cmd->device, cmd_other->device, pcmk__str_casei)) {
continue;
}
crm_notice("Merging fencing action '%s'%s%s originating from "
"client %s with identical fencing request from client %s",
cmd_other->action,
(cmd_other->target == NULL)? "" : " targeting ",
pcmk__s(cmd_other->target, ""), cmd_other->client_name,
cmd->client_name);
// Stop tracking the duplicate, send its result, and cancel it
cmd_list = g_list_remove_link(cmd_list, iter);
send_async_reply(cmd_other, result, pid, true);
cancel_stonith_command(cmd_other);
free_async_command(cmd_other);
g_list_free_1(iter);
}
}
/*!
* \internal
* \brief Return the next required device (if any) for an operation
*
* \param[in,out] cmd Fencing operation that just succeeded
*
* \return Next device required for action if any, otherwise NULL
*/
static fenced_device_t *
next_required_device(async_command_t *cmd)
{
for (GList *iter = cmd->next_device_iter; iter != NULL; iter = iter->next) {
fenced_device_t *next_device = g_hash_table_lookup(device_table,
iter->data);
if (is_action_required(cmd->action, next_device)) {
/* This is only called for successful actions, so it's OK to skip
* non-required devices.
*/
cmd->next_device_iter = iter->next;
return next_device;
}
}
return NULL;
}
static void
st_child_done(int pid, const pcmk__action_result_t *result, void *user_data)
{
async_command_t *cmd = user_data;
fenced_device_t *device = NULL;
fenced_device_t *next_device = NULL;
CRM_CHECK(cmd != NULL, return);
device = cmd_device(cmd);
cmd->active_on = NULL;
/* The device is ready to do something else now */
if (device) {
if (!pcmk_is_set(device->flags, fenced_df_verified)
&& pcmk__result_ok(result)
&& pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST,
PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS,
NULL)) {
fenced_device_set_flags(device, fenced_df_verified);
}
mainloop_set_trigger(device->work);
}
if (pcmk__result_ok(result)) {
next_device = next_required_device(cmd);
} else if ((cmd->next_device_iter != NULL)
&& !is_action_required(cmd->action, device)) {
/* if this device didn't work out, see if there are any others we can try.
* if the failed device was 'required', we can't pick another device. */
next_device = g_hash_table_lookup(device_table,
cmd->next_device_iter->data);
cmd->next_device_iter = cmd->next_device_iter->next;
}
if (next_device == NULL) {
send_async_reply(cmd, result, pid, false);
if (pcmk__result_ok(result)) {
reply_to_duplicates(cmd, result, pid);
}
free_async_command(cmd);
} else { // This operation requires more fencing
log_async_result(cmd, result, pid, next_device->id, false);
schedule_stonith_command(cmd, next_device);
}
}
static void
stonith_fence_get_devices_cb(GList * devices, void *user_data)
{
async_command_t *cmd = user_data;
fenced_device_t *device = NULL;
guint ndevices = g_list_length(devices);
crm_info("Found %d matching device%s for target '%s'",
ndevices, pcmk__plural_s(ndevices), cmd->target);
if (devices != NULL) {
device = g_hash_table_lookup(device_table, devices->data);
}
if (device == NULL) { // No device found
pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
pcmk__format_result(&result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"No device configured for target '%s'",
cmd->target);
send_async_reply(cmd, &result, 0, false);
pcmk__reset_result(&result);
free_async_command(cmd);
g_list_free_full(devices, free);
} else {
/* Device found. Schedule a fencing command for it.
*
* Assign devices to device_list so that it will be freed with cmd.
*/
cmd->device_list = devices;
cmd->next_device_iter = devices->next;
schedule_stonith_command(cmd, device);
}
}
/*!
* \internal
* \brief Execute a fence action via the local node
*
* \param[in] msg Fencing request
* \param[out] result Where to store result of fence action
*/
static void
fence_locally(xmlNode *msg, pcmk__action_result_t *result)
{
const char *device_id = NULL;
fenced_device_t *device = NULL;
async_command_t *cmd = NULL;
xmlNode *dev = NULL;
CRM_CHECK((msg != NULL) && (result != NULL), return);
dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]",
LOG_ERR);
cmd = create_async_command(msg);
if (cmd == NULL) {
crm_log_xml_warn(msg, "invalid");
set_bad_request_result(result);
return;
}
device_id = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (device_id != NULL) {
device = g_hash_table_lookup(device_table, device_id);
if (device == NULL) {
crm_err("Requested device '%s' is not available", device_id);
pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE,
"Requested device '%s' not found", device_id);
return;
}
schedule_stonith_command(cmd, device);
} else {
const char *host = crm_element_value(dev, PCMK__XA_ST_TARGET);
if (pcmk_is_set(cmd->options, st_opt_cs_nodeid)) {
int nodeid = 0;
pcmk__node_status_t *node = NULL;
pcmk__scan_min_int(host, &nodeid, 0);
node = pcmk__search_node_caches(nodeid, NULL, NULL,
pcmk__node_search_any
|pcmk__node_search_cluster_cib);
if (node != NULL) {
host = node->name;
}
}
/* If we get to here, then self-fencing is implicitly allowed */
get_capable_devices(host, cmd->action, cmd->default_timeout,
TRUE, cmd, stonith_fence_get_devices_cb,
fenced_support_flag(cmd->action));
}
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL);
}
/*!
* \internal
* \brief Build an XML reply for a fencing operation
*
* \param[in] request Request that reply is for
* \param[in] data If not NULL, add to reply as call data
* \param[in] result Full result of fencing operation
*
* \return Newly created XML reply
* \note The caller is responsible for freeing the result.
* \note This has some overlap with construct_async_reply(), but that copies
* values from an async_command_t, whereas this one copies them from the
* request.
*/
xmlNode *
fenced_construct_reply(const xmlNode *request, xmlNode *data,
const pcmk__action_result_t *result)
{
xmlNode *reply = NULL;
reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY);
crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__);
crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG);
stonith__xe_set_result(reply, result);
if (request == NULL) {
/* Most likely, this is the result of a stonith operation that was
* initiated before we came up. Unfortunately that means we lack enough
* information to provide clients with a full result.
*
* @TODO Maybe synchronize this information at start-up?
*/
crm_warn("Missing request information for client notifications for "
"operation with result '%s' (initiated before we came up?)",
pcmk_exec_status_str(result->execution_status));
} else {
const char *name = NULL;
const char *value = NULL;
// Attributes to copy from request to reply
const char *names[] = {
PCMK__XA_ST_OP,
PCMK__XA_ST_CALLID,
PCMK__XA_ST_CLIENTID,
PCMK__XA_ST_CLIENTNAME,
PCMK__XA_ST_REMOTE_OP,
PCMK__XA_ST_CALLOPT,
};
for (int lpc = 0; lpc < PCMK__NELEM(names); lpc++) {
name = names[lpc];
value = crm_element_value(request, name);
crm_xml_add(reply, name, value);
}
if (data != NULL) {
xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_ST_CALLDATA);
pcmk__xml_copy(wrapper, data);
}
}
return reply;
}
/*!
* \internal
* \brief Build an XML reply to an asynchronous fencing command
*
* \param[in] cmd Fencing command that reply is for
* \param[in] result Command result
*/
static xmlNode *
construct_async_reply(const async_command_t *cmd,
const pcmk__action_result_t *result)
{
xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY);
crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__);
crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG);
crm_xml_add(reply, PCMK__XA_ST_OP, cmd->op);
crm_xml_add(reply, PCMK__XA_ST_DEVICE_ID, cmd->device);
crm_xml_add(reply, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id);
crm_xml_add(reply, PCMK__XA_ST_CLIENTID, cmd->client);
crm_xml_add(reply, PCMK__XA_ST_CLIENTNAME, cmd->client_name);
crm_xml_add(reply, PCMK__XA_ST_TARGET, cmd->target);
crm_xml_add(reply, PCMK__XA_ST_DEVICE_ACTION, cmd->op);
crm_xml_add(reply, PCMK__XA_ST_ORIGIN, cmd->origin);
crm_xml_add_int(reply, PCMK__XA_ST_CALLID, cmd->id);
crm_xml_add_int(reply, PCMK__XA_ST_CALLOPT, cmd->options);
stonith__xe_set_result(reply, result);
return reply;
}
bool
fencing_peer_active(pcmk__node_status_t *peer)
{
return (peer != NULL) && (peer->name != NULL)
&& pcmk_is_set(peer->processes, crm_get_cluster_proc());
}
void
set_fencing_completed(remote_fencing_op_t *op)
{
struct timespec tv;
qb_util_timespec_from_epoch_get(&tv);
op->completed = tv.tv_sec;
op->completed_nsec = tv.tv_nsec;
}
/*!
* \internal
* \brief Look for alternate node needed if local node shouldn't fence target
*
* \param[in] target Node that must be fenced
*
* \return Name of an alternate node that should fence \p target if any,
* or NULL otherwise
*/
static const char *
check_alternate_host(const char *target)
{
if (pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) {
GHashTableIter gIter;
pcmk__node_status_t *entry = NULL;
g_hash_table_iter_init(&gIter, pcmk__peer_cache);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
if (fencing_peer_active(entry)
&& !pcmk__str_eq(entry->name, target, pcmk__str_casei)) {
crm_notice("Forwarding self-fencing request to %s",
entry->name);
return entry->name;
}
}
crm_warn("Will handle own fencing because no peer can");
}
return NULL;
}
static void
remove_relay_op(xmlNode * request)
{
xmlNode *dev = pcmk__xpath_find_one(request->doc,
"//*[@" PCMK__XA_ST_DEVICE_ACTION "]",
LOG_TRACE);
const char *relay_op_id = NULL;
const char *op_id = NULL;
const char *client_name = NULL;
const char *target = NULL;
remote_fencing_op_t *relay_op = NULL;
if (dev) {
target = crm_element_value(dev, PCMK__XA_ST_TARGET);
}
relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP_RELAY);
op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP);
client_name = crm_element_value(request, PCMK__XA_ST_CLIENTNAME);
/* Delete RELAY operation. */
if ((relay_op_id != NULL) && (target != NULL)
&& pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) {
relay_op = g_hash_table_lookup(stonith_remote_op_list, relay_op_id);
if (relay_op) {
GHashTableIter iter;
remote_fencing_op_t *list_op = NULL;
g_hash_table_iter_init(&iter, stonith_remote_op_list);
/* If the operation to be deleted is registered as a duplicate, delete the registration. */
while (g_hash_table_iter_next(&iter, NULL, (void **)&list_op)) {
GList *dup_iter = NULL;
if (list_op != relay_op) {
for (dup_iter = list_op->duplicates; dup_iter != NULL; dup_iter = dup_iter->next) {
remote_fencing_op_t *other = dup_iter->data;
if (other == relay_op) {
other->duplicates = g_list_remove(other->duplicates, relay_op);
break;
}
}
}
}
crm_debug("Deleting relay op %s ('%s'%s%s for %s), "
"replaced by op %s ('%s'%s%s for %s)",
relay_op->id, relay_op->action,
(relay_op->target == NULL)? "" : " targeting ",
pcmk__s(relay_op->target, ""),
relay_op->client_name, op_id, relay_op->action,
(target == NULL)? "" : " targeting ", pcmk__s(target, ""),
client_name);
g_hash_table_remove(stonith_remote_op_list, relay_op_id);
}
}
}
/*!
* \internal
* \brief Check whether an API request was sent by a privileged user
*
* API commands related to fencing configuration may be done only by privileged
* IPC users (i.e. root or hacluster), because all other users should go through
* the CIB to have ACLs applied. If no client was given, this is a peer request,
* which is always allowed.
*
* \param[in] c IPC client that sent request (or NULL if sent by CPG peer)
* \param[in] op Requested API operation (for logging only)
*
* \return true if sender is peer or privileged client, otherwise false
*/
static inline bool
is_privileged(const pcmk__client_t *c, const char *op)
{
if ((c == NULL) || pcmk_is_set(c->flags, pcmk__client_privileged)) {
return true;
} else {
crm_warn("Rejecting IPC request '%s' from unprivileged client %s",
pcmk__s(op, ""), pcmk__client_name(c));
return false;
}
}
// CRM_OP_REGISTER
static xmlNode *
handle_register_request(pcmk__request_t *request)
{
xmlNode *reply = pcmk__xe_create(NULL, "reply");
pcmk__assert(request->ipc_client != NULL);
crm_xml_add(reply, PCMK__XA_ST_OP, CRM_OP_REGISTER);
crm_xml_add(reply, PCMK__XA_ST_CLIENTID, request->ipc_client->id);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_request_flags(request, pcmk__request_reuse_options);
return reply;
}
// STONITH_OP_EXEC
static xmlNode *
handle_agent_request(pcmk__request_t *request)
{
execute_agent_action(request->xml, &request->result);
if (request->result.execution_status == PCMK_EXEC_PENDING) {
return NULL;
}
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_TIMEOUT_UPDATE
static xmlNode *
handle_update_timeout_request(pcmk__request_t *request)
{
const char *call_id = crm_element_value(request->xml, PCMK__XA_ST_CALLID);
const char *client_id = crm_element_value(request->xml,
PCMK__XA_ST_CLIENTID);
int op_timeout = 0;
crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &op_timeout);
do_stonith_async_timeout_update(client_id, call_id, op_timeout);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return NULL;
}
// STONITH_OP_QUERY
static xmlNode *
handle_query_request(pcmk__request_t *request)
{
int timeout = 0;
xmlNode *dev = NULL;
const char *action = NULL;
const char *target = NULL;
const char *client_id = crm_element_value(request->xml,
PCMK__XA_ST_CLIENTID);
struct st_query_data *query = NULL;
if (request->peer != NULL) {
// Record it for the future notification
create_remote_stonith_op(client_id, request->xml, TRUE);
}
/* Delete the DC node RELAY operation. */
remove_relay_op(request->xml);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
dev = pcmk__xpath_find_one(request->xml->doc,
"//*[@" PCMK__XA_ST_DEVICE_ACTION "]",
LOG_NEVER);
if (dev != NULL) {
const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (pcmk__str_eq(device, "manual_ack", pcmk__str_casei)) {
return NULL; // No query or reply necessary
}
target = crm_element_value(dev, PCMK__XA_ST_TARGET);
action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION);
}
crm_log_xml_trace(request->xml, "Query");
query = pcmk__assert_alloc(1, sizeof(struct st_query_data));
query->reply = fenced_construct_reply(request->xml, NULL, &request->result);
query->remote_peer = pcmk__str_copy(request->peer);
query->client_id = pcmk__str_copy(client_id);
query->target = pcmk__str_copy(target);
query->action = pcmk__str_copy(action);
query->call_options = request->call_options;
crm_element_value_int(request->xml, PCMK__XA_ST_TIMEOUT, &timeout);
get_capable_devices(target, action, timeout,
pcmk_is_set(query->call_options,
st_opt_allow_self_fencing),
query, stonith_query_capable_device_cb, fenced_df_none);
return NULL;
}
// STONITH_OP_NOTIFY
static xmlNode *
handle_notify_request(pcmk__request_t *request)
{
const char *flag_name = NULL;
pcmk__assert(request->ipc_client != NULL);
flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_ACTIVATE);
if (flag_name != NULL) {
crm_debug("Enabling %s callbacks for client %s",
flag_name, pcmk__request_origin(request));
pcmk__set_client_flags(request->ipc_client,
fenced_parse_notify_flag(flag_name));
}
flag_name = crm_element_value(request->xml, PCMK__XA_ST_NOTIFY_DEACTIVATE);
if (flag_name != NULL) {
crm_debug("Disabling %s callbacks for client %s",
flag_name, pcmk__request_origin(request));
pcmk__clear_client_flags(request->ipc_client,
fenced_parse_notify_flag(flag_name));
}
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_request_flags(request, pcmk__request_reuse_options);
return pcmk__ipc_create_ack(request->ipc_flags, PCMK__XE_ACK, NULL,
CRM_EX_OK);
}
// STONITH_OP_RELAY
static xmlNode *
handle_relay_request(pcmk__request_t *request)
{
xmlNode *dev = pcmk__xpath_find_one(request->xml->doc,
"//*[@" PCMK__XA_ST_TARGET "]",
LOG_TRACE);
crm_notice("Received forwarded fencing request from "
"%s %s to fence (%s) peer %s",
pcmk__request_origin_type(request),
pcmk__request_origin(request),
crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION),
crm_element_value(dev, PCMK__XA_ST_TARGET));
if (initiate_remote_stonith_op(NULL, request->xml, FALSE) == NULL) {
set_bad_request_result(&request->result);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL);
return NULL;
}
// STONITH_OP_FENCE
static xmlNode *
handle_fence_request(pcmk__request_t *request)
{
if (request->peer != NULL) {
fence_locally(request->xml, &request->result);
} else if (pcmk_is_set(request->call_options, st_opt_manual_ack)) {
switch (fenced_handle_manual_confirmation(request->ipc_client,
request->xml)) {
case pcmk_rc_ok:
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE,
NULL);
break;
case EINPROGRESS:
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING,
NULL);
break;
default:
set_bad_request_result(&request->result);
break;
}
} else {
const char *alternate_host = NULL;
xmlNode *dev = pcmk__xpath_find_one(request->xml->doc,
"//*[@" PCMK__XA_ST_TARGET "]",
LOG_TRACE);
const char *target = crm_element_value(dev, PCMK__XA_ST_TARGET);
const char *action = crm_element_value(dev, PCMK__XA_ST_DEVICE_ACTION);
const char *device = crm_element_value(dev, PCMK__XA_ST_DEVICE_ID);
if (request->ipc_client != NULL) {
int tolerance = 0;
crm_notice("Client %s wants to fence (%s) %s using %s",
pcmk__request_origin(request), action,
target, (device? device : "any device"));
crm_element_value_int(dev, PCMK__XA_ST_TOLERANCE, &tolerance);
if (stonith_check_fence_tolerance(tolerance, target, action)) {
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE,
NULL);
return fenced_construct_reply(request->xml, NULL,
&request->result);
}
alternate_host = check_alternate_host(target);
} else {
crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'",
request->peer, action, target,
(device == NULL)? "(any)" : device);
}
if (alternate_host != NULL) {
const char *client_id = NULL;
remote_fencing_op_t *op = NULL;
pcmk__node_status_t *node =
pcmk__get_node(0, alternate_host, NULL,
pcmk__node_search_cluster_member);
if (request->ipc_client->id == 0) {
client_id = crm_element_value(request->xml,
PCMK__XA_ST_CLIENTID);
} else {
client_id = request->ipc_client->id;
}
/* Create a duplicate fencing operation to relay with the client ID.
* When a query response is received, this operation should be
* deleted to avoid keeping the duplicate around.
*/
op = create_remote_stonith_op(client_id, request->xml, FALSE);
crm_xml_add(request->xml, PCMK__XA_ST_OP, STONITH_OP_RELAY);
crm_xml_add(request->xml, PCMK__XA_ST_CLIENTID,
request->ipc_client->id);
crm_xml_add(request->xml, PCMK__XA_ST_REMOTE_OP, op->id);
// @TODO On failure, fail request immediately, or maybe panic
pcmk__cluster_send_message(node, pcmk_ipc_fenced, request->xml);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING,
NULL);
} else if (initiate_remote_stonith_op(request->ipc_client, request->xml,
FALSE) == NULL) {
set_bad_request_result(&request->result);
} else {
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING,
NULL);
}
}
if (request->result.execution_status == PCMK_EXEC_PENDING) {
return NULL;
}
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_FENCE_HISTORY
static xmlNode *
handle_history_request(pcmk__request_t *request)
{
xmlNode *reply = NULL;
xmlNode *data = NULL;
stonith_fence_history(request->xml, &data, request->peer,
request->call_options);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
if (!pcmk_is_set(request->call_options, st_opt_discard_reply)) {
/* When the local node broadcasts its history, it sets
* st_opt_discard_reply and doesn't need a reply.
*/
reply = fenced_construct_reply(request->xml, data, &request->result);
}
pcmk__xml_free(data);
return reply;
}
// STONITH_OP_DEVICE_ADD
static xmlNode *
handle_device_add_request(pcmk__request_t *request)
{
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
xmlNode *dev = pcmk__xpath_find_one(request->xml->doc,
"//" PCMK__XE_ST_DEVICE_ID, LOG_ERR);
if (is_privileged(request->ipc_client, op)) {
int rc = fenced_device_register(dev, false);
rc = pcmk_rc2legacy(rc);
pcmk__set_result(&request->result,
((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR),
stonith__legacy2status(rc),
((rc == pcmk_ok)? NULL : pcmk_strerror(rc)));
} else {
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must register device via CIB");
}
fenced_send_config_notification(op, &request->result,
(dev == NULL)? NULL : pcmk__xe_id(dev));
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_DEVICE_DEL
static xmlNode *
handle_device_delete_request(pcmk__request_t *request)
{
xmlNode *dev = pcmk__xpath_find_one(request->xml->doc,
"//" PCMK__XE_ST_DEVICE_ID, LOG_ERR);
const char *device_id = crm_element_value(dev, PCMK_XA_ID);
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
if (is_privileged(request->ipc_client, op)) {
stonith_device_remove(device_id, false);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
} else {
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must delete device via CIB");
}
fenced_send_config_notification(op, &request->result, device_id);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_LEVEL_ADD
static xmlNode *
handle_level_add_request(pcmk__request_t *request)
{
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
if (is_privileged(request->ipc_client, op)) {
fenced_register_level(request->xml, &request->result);
} else {
unpack_level_request(request->xml, NULL, NULL, NULL);
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must add level via CIB");
}
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// STONITH_OP_LEVEL_DEL
static xmlNode *
handle_level_delete_request(pcmk__request_t *request)
{
const char *op = crm_element_value(request->xml, PCMK__XA_ST_OP);
if (is_privileged(request->ipc_client, op)) {
fenced_unregister_level(request->xml, &request->result);
} else {
unpack_level_request(request->xml, NULL, NULL, NULL);
pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV,
PCMK_EXEC_INVALID,
"Unprivileged users must delete level via CIB");
}
return fenced_construct_reply(request->xml, NULL, &request->result);
}
// CRM_OP_RM_NODE_CACHE
static xmlNode *
handle_cache_request(pcmk__request_t *request)
{
int node_id = 0;
const char *name = NULL;
crm_element_value_int(request->xml, PCMK_XA_ID, &node_id);
name = crm_element_value(request->xml, PCMK_XA_UNAME);
pcmk__cluster_forget_cluster_node(node_id, name);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return NULL;
}
static xmlNode *
handle_unknown_request(pcmk__request_t *request)
{
crm_err("Unknown IPC request %s from %s %s",
request->op, pcmk__request_origin_type(request),
pcmk__request_origin(request));
pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID,
"Unknown IPC request type '%s' (bug?)", request->op);
return fenced_construct_reply(request->xml, NULL, &request->result);
}
static void
fenced_register_handlers(void)
{
pcmk__server_command_t handlers[] = {
{ CRM_OP_REGISTER, handle_register_request },
{ STONITH_OP_EXEC, handle_agent_request },
{ STONITH_OP_TIMEOUT_UPDATE, handle_update_timeout_request },
{ STONITH_OP_QUERY, handle_query_request },
{ STONITH_OP_NOTIFY, handle_notify_request },
{ STONITH_OP_RELAY, handle_relay_request },
{ STONITH_OP_FENCE, handle_fence_request },
{ STONITH_OP_FENCE_HISTORY, handle_history_request },
{ STONITH_OP_DEVICE_ADD, handle_device_add_request },
{ STONITH_OP_DEVICE_DEL, handle_device_delete_request },
{ STONITH_OP_LEVEL_ADD, handle_level_add_request },
{ STONITH_OP_LEVEL_DEL, handle_level_delete_request },
{ CRM_OP_RM_NODE_CACHE, handle_cache_request },
{ NULL, handle_unknown_request },
};
fenced_handlers = pcmk__register_handlers(handlers);
}
void
fenced_unregister_handlers(void)
{
if (fenced_handlers != NULL) {
g_hash_table_destroy(fenced_handlers);
fenced_handlers = NULL;
}
}
static void
handle_request(pcmk__request_t *request)
{
xmlNode *reply = NULL;
const char *reason = NULL;
if (fenced_handlers == NULL) {
fenced_register_handlers();
}
reply = pcmk__process_request(request, fenced_handlers);
if (reply != NULL) {
if (pcmk_is_set(request->flags, pcmk__request_reuse_options)
&& (request->ipc_client != NULL)) {
/* Certain IPC-only commands must reuse the call options from the
* original request rather than the ones set by stonith_send_reply()
* -> do_local_reply().
*/
pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply,
request->ipc_flags);
request->ipc_client->request_id = 0;
} else {
stonith_send_reply(reply, request->call_options,
request->peer, request->ipc_client);
}
pcmk__xml_free(reply);
}
reason = request->result.exit_reason;
crm_debug("Processed %s request from %s %s: %s%s%s%s",
request->op, pcmk__request_origin_type(request),
pcmk__request_origin(request),
pcmk_exec_status_str(request->result.execution_status),
(reason == NULL)? "" : " (",
(reason == NULL)? "" : reason,
(reason == NULL)? "" : ")");
}
static void
handle_reply(pcmk__client_t *client, xmlNode *request, const char *remote_peer)
{
// Copy, because request might be freed before we want to log this
char *op = crm_element_value_copy(request, PCMK__XA_ST_OP);
if (pcmk__str_eq(op, STONITH_OP_QUERY, pcmk__str_none)) {
process_remote_stonith_query(request);
} else if (pcmk__str_any_of(op, STONITH_OP_NOTIFY, STONITH_OP_FENCE,
NULL)) {
fenced_process_fencing_reply(request);
} else {
crm_err("Ignoring unknown %s reply from %s %s",
pcmk__s(op, "untyped"), ((client == NULL)? "peer" : "client"),
((client == NULL)? remote_peer : pcmk__client_name(client)));
crm_log_xml_warn(request, "UnknownOp");
free(op);
return;
}
crm_debug("Processed %s reply from %s %s",
op, ((client == NULL)? "peer" : "client"),
((client == NULL)? remote_peer : pcmk__client_name(client)));
free(op);
}
/*!
* \internal
* \brief Handle a message from an IPC client or CPG peer
*
* \param[in,out] client If not NULL, IPC client that sent message
* \param[in] id If from IPC client, IPC message ID
* \param[in] flags Message flags
* \param[in,out] message Message XML
* \param[in] remote_peer If not NULL, CPG peer that sent message
*/
void
stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags,
xmlNode *message, const char *remote_peer)
{
uint32_t call_options = st_opt_none;
int rc = pcmk_rc_ok;
bool is_reply = false;
CRM_CHECK(message != NULL, return);
if (pcmk__xpath_find_one(message->doc, "//" PCMK__XE_ST_REPLY,
LOG_NEVER) != NULL) {
is_reply = true;
}
rc = pcmk__xe_get_flags(message, PCMK__XA_ST_CALLOPT, &call_options,
st_opt_none);
if (rc != pcmk_rc_ok) {
crm_warn("Couldn't parse options from message: %s", pcmk_rc_str(rc));
}
crm_debug("Processing %ssynchronous %s %s %u from %s %s",
pcmk_is_set(call_options, st_opt_sync_call)? "" : "a",
crm_element_value(message, PCMK__XA_ST_OP),
(is_reply? "reply" : "request"), id,
((client == NULL)? "peer" : "client"),
((client == NULL)? remote_peer : pcmk__client_name(client)));
if (pcmk_is_set(call_options, st_opt_sync_call)) {
pcmk__assert((client == NULL) || (client->request_id == id));
}
if (is_reply) {
handle_reply(client, message, remote_peer);
} else {
pcmk__request_t request = {
.ipc_client = client,
.ipc_id = id,
.ipc_flags = flags,
.peer = remote_peer,
.xml = message,
.call_options = call_options,
.result = PCMK__UNKNOWN_RESULT,
};
request.op = crm_element_value_copy(request.xml, PCMK__XA_ST_OP);
CRM_CHECK(request.op != NULL, return);
if (pcmk_is_set(request.call_options, st_opt_sync_call)) {
pcmk__set_request_flags(&request, pcmk__request_sync);
}
handle_request(&request);
pcmk__reset_request(&request);
}
}
diff --git a/include/crm/common/digest_internal.h b/include/crm/common/digest_internal.h
index 729b4e8cbc..45be1cf8b7 100644
--- a/include/crm/common/digest_internal.h
+++ b/include/crm/common/digest_internal.h
@@ -1,54 +1,54 @@
/*
* Copyright 2015-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__CRM_COMMON_DIGEST_INTERNAL__H
#define PCMK__CRM_COMMON_DIGEST_INTERNAL__H
/*
* Internal-only functions to create digest strings from XML
*/
#include <stdbool.h>
#include <libxml/tree.h> // xmlNode
#ifdef __cplusplus
extern "C" {
#endif
// Digest comparison results
enum pcmk__digest_result {
pcmk__digest_unknown, // No digest available for comparison
pcmk__digest_match, // Digests match
pcmk__digest_mismatch, // Any parameter changed (potentially reloadable)
pcmk__digest_restart, // Parameters that require a restart changed
};
// Information needed to compare operation digests
typedef struct {
enum pcmk__digest_result rc; // Result of digest comparison
xmlNode *params_all; // All operation parameters
xmlNode *params_secure; // Parameters marked private
xmlNode *params_restart; // Parameters marked not reloadable
char *digest_all_calc; // Digest of params_all
char *digest_secure_calc; // Digest of params_secure
char *digest_restart_calc; // Digest of params_restart
} pcmk__op_digest_t;
char *pcmk__digest_on_disk_cib(const xmlNode *input);
-char *pcmk__digest_operation(const xmlNode *input);
+char *pcmk__digest_op_params(const xmlNode *input);
char *pcmk__digest_xml(const xmlNode *input, bool filter);
bool pcmk__verify_digest(const xmlNode *input, const char *expected);
#ifdef __cplusplus
}
#endif
#endif // PCMK__CRM_COMMON_DIGEST_INTERNAL__H
diff --git a/lib/common/digest.c b/lib/common/digest.c
index ec7c8bdc10..3479f8a425 100644
--- a/lib/common/digest.c
+++ b/lib/common/digest.c
@@ -1,388 +1,393 @@
/*
* Copyright 2015-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <glib.h> // GString, etc.
#include <gnutls/crypto.h> // gnutls_hash_fast(), gnutls_hash_get_len()
#include <gnutls/gnutls.h> // gnutls_strerror()
#include <crm/crm.h>
#include <crm/common/xml.h>
#include "crmcommon_private.h"
#define BEST_EFFORT_STATUS 0
/*
* Pacemaker uses digests (MD5 hashes) of stringified XML to detect changes in
* the CIB as a whole, a particular resource's agent parameters, and the device
* parameters last used to unfence a particular node.
*
* "v2" digests hash pcmk__xml_string() directly, while less efficient "v1"
* digests do the same with a prefixed space, suffixed newline, and optional
* pre-sorting.
*
* On-disk CIB digests use v1 without sorting.
*
* Operation digests use v1 with sorting, and are stored in a resource's
* operation history in the CIB status section. They come in three flavors:
* - a digest of (nearly) all resource parameters and options, used to detect
* any resource configuration change;
* - a digest of resource parameters marked as nonreloadable, used to decide
* whether a reload or full restart is needed after a configuration change;
* - and a digest of resource parameters not marked as private, used in
* simulations where private parameters have been removed from the input.
*
* Unfencing digests are set as node attributes, and are used to require
* that nodes be unfenced again after a device's configuration changes.
*/
/*!
* \internal
* \brief Dump XML in a format used with v1 digests
*
* \param[in] xml Root of XML to dump
*
* \return Newly allocated buffer containing dumped XML
*/
static GString *
dump_xml_for_digest(const xmlNode *xml)
{
GString *buffer = g_string_sized_new(1024);
/* for compatibility with the old result which is used for v1 digests */
g_string_append_c(buffer, ' ');
pcmk__xml_string(xml, 0, buffer, 0);
g_string_append_c(buffer, '\n');
return buffer;
}
/*!
* \internal
* \brief Calculate and return v1 digest of XML tree
*
* \param[in] input Root of XML to digest
*
* \return Newly allocated string containing digest
*
* \note Example return value: "c048eae664dba840e1d2060f00299e9d"
*/
static char *
calculate_xml_digest_v1(const xmlNode *input)
{
GString *buffer = dump_xml_for_digest(input);
char *digest = NULL;
// buffer->len > 2 for initial space and trailing newline
CRM_CHECK(buffer->len > 2,
g_string_free(buffer, TRUE);
return NULL);
digest = crm_md5sum((const char *) buffer->str);
crm_log_xml_trace(input, "digest:source");
g_string_free(buffer, TRUE);
return digest;
}
/*!
* \internal
* \brief Calculate and return the digest of a CIB, suitable for storing on disk
*
* \param[in] input Root of XML to digest
*
* \return Newly allocated string containing digest
*/
char *
pcmk__digest_on_disk_cib(const xmlNode *input)
{
/* Always use the v1 format for on-disk digests.
* * Switching to v2 affects even full-restart upgrades, so it would be a
* compatibility nightmare.
* * We only use this once at startup. All other invocations are in a
* separate child process.
*/
return calculate_xml_digest_v1(input);
}
/*!
* \internal
- * \brief Calculate and return digest of an operation XML element
+ * \brief Calculate and return digest of a \c PCMK_XE_PARAMETERS element
+ *
+ * This is intended for parameters of a resource operation (also known as
+ * resource action). A \c PCMK_XE_PARAMETERS element from a different source
+ * (for example, resource agent metadata) may have child elements, which are not
+ * allowed here.
*
* The digest is invariant to changes in the order of XML attributes.
*
- * \param[in] input Root of XML to digest (must have no children)
+ * \param[in] input XML element to digest (must have no children)
*
* \return Newly allocated string containing digest
*/
char *
-pcmk__digest_operation(const xmlNode *input)
+pcmk__digest_op_params(const xmlNode *input)
{
/* Switching to v2 digests would likely cause restarts during rolling
* upgrades.
*
* @TODO Confirm this. Switch to v2 if safe, or drop this TODO otherwise.
*/
char *digest = NULL;
xmlNode *sorted = NULL;
pcmk__assert(input->children == NULL);
sorted = pcmk__xe_create(NULL, (const char *) input->name);
pcmk__xe_copy_attrs(sorted, input, pcmk__xaf_none);
pcmk__xe_sort_attrs(sorted);
digest = calculate_xml_digest_v1(sorted);
pcmk__xml_free(sorted);
return digest;
}
/*!
* \internal
* \brief Calculate and return the digest of an XML tree
*
* \param[in] xml XML tree to digest
* \param[in] filter Whether to filter certain XML attributes
*
* \return Newly allocated string containing digest
*/
char *
pcmk__digest_xml(const xmlNode *xml, bool filter)
{
/* @TODO Filtering accounts for significant CPU usage. Consider removing if
* possible.
*/
char *digest = NULL;
GString *buf = g_string_sized_new(1024);
pcmk__xml_string(xml, (filter? pcmk__xml_fmt_filtered : 0), buf, 0);
digest = crm_md5sum(buf->str);
pcmk__if_tracing(
{
char *trace_file = crm_strdup_printf("%s/digest-%s",
pcmk__get_tmpdir(), digest);
crm_trace("Saving %s.%s.%s to %s",
crm_element_value(xml, PCMK_XA_ADMIN_EPOCH),
crm_element_value(xml, PCMK_XA_EPOCH),
crm_element_value(xml, PCMK_XA_NUM_UPDATES),
trace_file);
save_xml_to_file(xml, "digest input", trace_file);
free(trace_file);
},
{}
);
g_string_free(buf, TRUE);
return digest;
}
/*!
* \internal
* \brief Check whether calculated digest of given XML matches expected digest
*
* \param[in] input Root of XML tree to digest
* \param[in] expected Expected digest in on-disk format
*
* \return true if digests match, false on mismatch or error
*/
bool
pcmk__verify_digest(const xmlNode *input, const char *expected)
{
char *calculated = NULL;
bool passed;
if (input != NULL) {
calculated = pcmk__digest_on_disk_cib(input);
if (calculated == NULL) {
crm_perror(LOG_ERR, "Could not calculate digest for comparison");
return false;
}
}
passed = pcmk__str_eq(expected, calculated, pcmk__str_casei);
if (passed) {
crm_trace("Digest comparison passed: %s", calculated);
} else {
crm_err("Digest comparison failed: expected %s, calculated %s",
expected, calculated);
}
free(calculated);
return passed;
}
/*!
* \internal
* \brief Check whether an XML attribute should be excluded from CIB digests
*
* \param[in] name XML attribute name
*
* \return true if XML attribute should be excluded from CIB digest calculation
*/
bool
pcmk__xa_filterable(const char *name)
{
static const char *filter[] = {
PCMK_XA_CRM_DEBUG_ORIGIN,
PCMK_XA_CIB_LAST_WRITTEN,
PCMK_XA_UPDATE_ORIGIN,
PCMK_XA_UPDATE_CLIENT,
PCMK_XA_UPDATE_USER,
};
for (int i = 0; i < PCMK__NELEM(filter); i++) {
if (strcmp(name, filter[i]) == 0) {
return true;
}
}
return false;
}
char *
crm_md5sum(const char *buffer)
{
char *digest = NULL;
gchar *raw_digest = NULL;
if (buffer == NULL) {
return NULL;
}
raw_digest = g_compute_checksum_for_string(G_CHECKSUM_MD5, buffer, -1);
if (raw_digest == NULL) {
crm_err("Failed to calculate hash");
return NULL;
}
digest = pcmk__str_copy(raw_digest);
g_free(raw_digest);
crm_trace("Digest %s.", digest);
return digest;
}
// Return true if a is an attribute that should be filtered
static bool
should_filter_for_digest(xmlAttrPtr a, void *user_data)
{
if (strncmp((const char *) a->name, CRM_META "_",
sizeof(CRM_META " ") - 1) == 0) {
return true;
}
return pcmk__str_any_of((const char *) a->name,
PCMK_XA_ID,
PCMK_XA_CRM_FEATURE_SET,
PCMK__XA_OP_DIGEST,
PCMK__META_ON_NODE,
PCMK__META_ON_NODE_UUID,
"pcmk_external_ip",
NULL);
}
/*!
* \internal
* \brief Remove XML attributes not needed for operation digest
*
* \param[in,out] param_set XML with operation parameters
*/
void
pcmk__filter_op_for_digest(xmlNode *param_set)
{
char *key = NULL;
char *timeout = NULL;
guint interval_ms = 0;
if (param_set == NULL) {
return;
}
/* Timeout is useful for recurring operation digests, so grab it before
* removing meta-attributes
*/
key = crm_meta_name(PCMK_META_INTERVAL);
if (crm_element_value_ms(param_set, key, &interval_ms) != pcmk_ok) {
interval_ms = 0;
}
free(key);
key = NULL;
if (interval_ms != 0) {
key = crm_meta_name(PCMK_META_TIMEOUT);
timeout = crm_element_value_copy(param_set, key);
}
// Remove all CRM_meta_* attributes and certain other attributes
pcmk__xe_remove_matching_attrs(param_set, false, should_filter_for_digest,
NULL);
// Add timeout back for recurring operation digests
if (timeout != NULL) {
crm_xml_add(param_set, key, timeout);
}
free(timeout);
free(key);
}
// Deprecated functions kept only for backward API compatibility
// LCOV_EXCL_START
#include <crm/common/xml_compat.h>
#include <crm/common/xml_element_compat.h>
char *
calculate_on_disk_digest(xmlNode *input)
{
return calculate_xml_digest_v1(input);
}
char *
calculate_operation_digest(xmlNode *input, const char *version)
{
xmlNode *sorted = sorted_xml(input, NULL, true);
char *digest = calculate_xml_digest_v1(sorted);
pcmk__xml_free(sorted);
return digest;
}
char *
calculate_xml_versioned_digest(xmlNode *input, gboolean sort,
gboolean do_filter, const char *version)
{
if ((version == NULL) || (compare_version("3.0.5", version) > 0)) {
xmlNode *sorted = NULL;
char *digest = NULL;
if (sort) {
xmlNode *sorted = sorted_xml(input, NULL, true);
input = sorted;
}
crm_trace("Using v1 digest algorithm for %s",
pcmk__s(version, "unknown feature set"));
digest = calculate_xml_digest_v1(input);
pcmk__xml_free(sorted);
return digest;
}
crm_trace("Using v2 digest algorithm for %s", version);
return pcmk__digest_xml(input, do_filter);
}
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index c7b7c62fbc..8a45c15f99 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1959 +1,1959 @@
/*
* Copyright 2004-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <stdbool.h> // bool, true, false
#include <stdio.h>
#include <sys/param.h>
#include <glib.h>
#include <libxml/tree.h> // xmlNode
#include <crm/lrmd_internal.h>
#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
/*!
* \internal
* \brief Get the action flags relevant to ordering constraints
*
* \param[in,out] action Action to check
* \param[in] node Node that *other* action in the ordering is on
* (used only for clone resource actions)
*
* \return Action flags that should be used for orderings
*/
static uint32_t
action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
{
bool runnable = false;
uint32_t flags;
// For non-resource actions, return the action flags
if (action->rsc == NULL) {
return action->flags;
}
/* For non-clone resources, or a clone action not assigned to a node,
* return the flags as determined by the resource method without a node
* specified.
*/
flags = action->rsc->priv->cmds->action_flags(action, NULL);
if ((node == NULL) || !pcmk__is_clone(action->rsc)) {
return flags;
}
/* Otherwise (i.e., for clone resource actions on a specific node), first
* remember whether the non-node-specific action is runnable.
*/
runnable = pcmk_is_set(flags, pcmk__action_runnable);
// Then recheck the resource method with the node
flags = action->rsc->priv->cmds->action_flags(action, node);
/* For clones in ordering constraints, the node-specific "runnable" doesn't
* matter, just the non-node-specific setting (i.e., is the action runnable
* anywhere).
*
* This applies only to runnable, and only for ordering constraints. This
* function shouldn't be used for other types of constraints without
* changes. Not very satisfying, but it's logical and appears to work well.
*/
if (runnable && !pcmk_is_set(flags, pcmk__action_runnable)) {
pcmk__set_raw_action_flags(flags, action->rsc->id,
pcmk__action_runnable);
}
return flags;
}
/*!
* \internal
* \brief Get action UUID that should be used with a resource ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the UUID and resource of the first action in an
* ordering, this returns the UUID of the action that should actually be used
* for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
*
* \param[in] first_uuid UUID of first action in ordering
* \param[in] first_rsc Resource of first action in ordering
*
* \return Newly allocated copy of UUID to use with ordering
* \note It is the caller's responsibility to free the return value.
*/
static char *
action_uuid_for_ordering(const char *first_uuid,
const pcmk_resource_t *first_rsc)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *first_task_str = NULL;
enum pcmk__action_type first_task = pcmk__action_unspecified;
enum pcmk__action_type remapped_task = pcmk__action_unspecified;
// Only non-notify actions for collective resources need remapping
if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
|| (first_rsc->priv->variant < pcmk__rsc_variant_group)) {
goto done;
}
// Only non-recurring actions need remapping
pcmk__assert(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
if (interval_ms > 0) {
goto done;
}
first_task = pcmk__parse_action(first_task_str);
switch (first_task) {
case pcmk__action_stop:
case pcmk__action_start:
case pcmk__action_notify:
case pcmk__action_promote:
case pcmk__action_demote:
remapped_task = first_task + 1;
break;
case pcmk__action_stopped:
case pcmk__action_started:
case pcmk__action_notified:
case pcmk__action_promoted:
case pcmk__action_demoted:
remapped_task = first_task;
break;
case pcmk__action_monitor:
case pcmk__action_shutdown:
case pcmk__action_fence:
break;
default:
crm_err("Unknown action '%s' in ordering", first_task_str);
break;
}
if (remapped_task != pcmk__action_unspecified) {
/* If a clone or bundle has notifications enabled, the ordering will be
* relative to when notifications have been sent for the remapped task.
*/
if (pcmk_is_set(first_rsc->flags, pcmk__rsc_notify)
&& (pcmk__is_clone(first_rsc) || pcmk__is_bundled(first_rsc))) {
uuid = pcmk__notify_key(rid, "confirmed-post",
pcmk__action_text(remapped_task));
} else {
uuid = pcmk__op_key(rid, pcmk__action_text(remapped_task), 0);
}
pcmk__rsc_trace(first_rsc,
"Remapped action UUID %s to %s for ordering purposes",
first_uuid, uuid);
}
done:
free(first_task_str);
free(rid);
return (uuid != NULL)? uuid : pcmk__str_copy(first_uuid);
}
/*!
* \internal
* \brief Get actual action that should be used with an ordering
*
* When an action is ordered relative to an action for a collective resource
* (clone, group, or bundle), it actually needs to be ordered after all
* instances of the collective have completed the relevant action (for example,
* given "start CLONE then start RSC", RSC must wait until all instances of
* CLONE have started). Given the first action in an ordering, this returns the
* the action that should actually be used for ordering (for example, the
* started action instead of the start action).
*
* \param[in] action First action in an ordering
*
* \return Actual action that should be used for the ordering
*/
static pcmk_action_t *
action_for_ordering(pcmk_action_t *action)
{
pcmk_action_t *result = action;
pcmk_resource_t *rsc = action->rsc;
if (rsc == NULL) {
return result;
}
if ((rsc->priv->variant >= pcmk__rsc_variant_group)
&& (action->uuid != NULL)) {
char *uuid = action_uuid_for_ordering(action->uuid, rsc);
result = find_first_action(rsc->priv->actions, uuid, NULL, NULL);
if (result == NULL) {
crm_warn("Not remapping %s to %s because %s does not have "
"remapped action", action->uuid, uuid, rsc->id);
result = action;
}
free(uuid);
}
return result;
}
/*!
* \internal
* \brief Wrapper for update_ordered_actions() method for readability
*
* \param[in,out] rsc Resource to call method for
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this
* node (only used when interleaving instances)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates
* (may include pcmk__action_optional to affect only
* mandatory actions, and pe_action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static inline uint32_t
update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
return rsc->priv->cmds->update_ordered_actions(first, then, node, flags,
filter, type, scheduler);
}
/*!
* \internal
* \brief Update flags for ordering's actions appropriately for ordering's flags
*
* \param[in,out] first First action in an ordering
* \param[in,out] then Then action in an ordering
* \param[in] first_flags Action flags for \p first for ordering purposes
* \param[in] then_flags Action flags for \p then for ordering purposes
* \param[in,out] order Action wrapper for \p first in ordering
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags
*/
static uint32_t
update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
uint32_t first_flags, uint32_t then_flags,
pcmk__related_action_t *order,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
/* The node will only be used for clones. If interleaved, node will be NULL,
* otherwise the ordering scope will be limited to the node. Normally, the
* whole 'then' clone should restart if 'first' is restarted, so then->node
* is needed.
*/
pcmk_node_t *node = then->node;
if (pcmk_is_set(order->flags, pcmk__ar_first_implies_same_node_then)) {
/* For unfencing, only instances of 'then' on the same node as 'first'
* (the unfencing operation) should restart, so reset node to
* first->node, at which point this case is handled like a normal
* pcmk__ar_first_implies_then.
*/
pcmk__clear_relation_flags(order->flags,
pcmk__ar_first_implies_same_node_then);
pcmk__set_relation_flags(order->flags, pcmk__ar_first_implies_then);
node = first->node;
pcmk__rsc_trace(then->rsc,
"%s then %s: mapped "
"pcmk__ar_first_implies_same_node_then to "
"pcmk__ar_first_implies_then on %s",
first->uuid, then->uuid, pcmk__node_name(node));
}
if (pcmk_is_set(order->flags, pcmk__ar_first_implies_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk__action_optional,
pcmk__action_optional,
pcmk__ar_first_implies_then, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk__action_optional)
&& pcmk_is_set(then->flags, pcmk__action_optional)) {
pcmk__clear_action_flags(then, pcmk__action_optional);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_implies_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_intermediate_stop)
&& (then->rsc != NULL)) {
enum pcmk__action_flags restart = pcmk__action_optional
|pcmk__action_runnable;
changed |= update(then->rsc, first, then, node, first_flags, restart,
pcmk__ar_intermediate_stop, scheduler);
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_intermediate_stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_then_implies_first)) {
if (first->rsc != NULL) {
changed |= update(first->rsc, first, then, node, first_flags,
pcmk__action_optional,
pcmk__ar_then_implies_first, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk__action_optional)
&& pcmk_is_set(first->flags, pcmk__action_runnable)) {
pcmk__clear_action_flags(first, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_promoted_then_implies_first)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node,
first_flags & pcmk__action_optional,
pcmk__action_optional,
pcmk__ar_promoted_then_implies_first, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after "
"pcmk__ar_promoted_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_min_runnable)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable, pcmk__ar_min_runnable,
scheduler);
} else if (pcmk_is_set(first_flags, pcmk__action_runnable)) {
// We have another runnable instance of "first"
then->runnable_before++;
/* Mark "then" as runnable if it requires a certain number of
* "before" instances to be runnable, and they now are.
*/
if ((then->runnable_before >= then->required_runnable_before)
&& !pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__set_action_flags(then, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_nested_remote_probe)
&& (then->rsc != NULL)) {
if (!pcmk_is_set(first_flags, pcmk__action_runnable)
&& (first->rsc != NULL)
&& (first->rsc->priv->active_nodes != NULL)) {
pcmk__rsc_trace(then->rsc,
"%s then %s: ignoring because first is stopping",
first->uuid, then->uuid);
order->flags = pcmk__ar_none;
} else {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_nested_remote_probe",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_unrunnable_first_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable,
pcmk__ar_unrunnable_first_blocks, scheduler);
} else if (!pcmk_is_set(first_flags, pcmk__action_runnable)
&& pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__clear_action_flags(then, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_unmigratable_then_blocks)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_optional,
pcmk__ar_unmigratable_then_blocks, scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after "
"pcmk__ar_unmigratable_then_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_first_else_then)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_optional, pcmk__ar_first_else_then,
scheduler);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after pcmk__ar_first_else_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_ordered)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable, pcmk__ar_ordered,
scheduler);
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(order->flags, pcmk__ar_asymmetric)) {
if (then->rsc != NULL) {
changed |= update(then->rsc, first, then, node, first_flags,
pcmk__action_runnable, pcmk__ar_asymmetric,
scheduler);
}
pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
if (pcmk_is_set(first->flags, pcmk__action_runnable)
&& pcmk_is_set(order->flags, pcmk__ar_first_implies_then_graphed)
&& !pcmk_is_set(first_flags, pcmk__action_optional)) {
pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
then->uuid, first->uuid);
pcmk__set_action_flags(then, pcmk__action_always_in_graph);
// Don't bother marking 'then' as changed just for this
}
if (pcmk_is_set(order->flags, pcmk__ar_then_implies_first_graphed)
&& !pcmk_is_set(then_flags, pcmk__action_optional)) {
pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
first->uuid, then->uuid);
pcmk__set_action_flags(first, pcmk__action_always_in_graph);
// Don't bother marking 'first' as changed just for this
}
if (pcmk_any_flags_set(order->flags, pcmk__ar_first_implies_then
|pcmk__ar_then_implies_first
|pcmk__ar_intermediate_stop)
&& (first->rsc != NULL)
&& !pcmk_is_set(first->rsc->flags, pcmk__rsc_managed)
&& pcmk_is_set(first->rsc->flags, pcmk__rsc_blocked)
&& !pcmk_is_set(first->flags, pcmk__action_runnable)
&& pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* @TODO This seems odd; why wouldn't an unrunnable "first" already
* block "then" before this? Note that the unmanaged-stop-{1,2}
* scheduler regression tests and the test CIB for T209 have tests for
* "stop then stop" relations that would be good for checking any
* changes.
*/
if (pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__clear_action_flags(then, pcmk__action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pcmk__rsc_trace(then->rsc,
"%s then %s: %s after checking whether first "
"is blocked, unmanaged, unrunnable stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
return changed;
}
// Convenience macros for logging action properties
#define action_type_str(flags) \
(pcmk_is_set((flags), pcmk__action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
(pcmk_is_set((flags), pcmk__action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
(pcmk_is_set((flags), pcmk__action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->priv->name)
/*!
* \internal
* \brief Update an action's flags for all orderings where it is "then"
*
* \param[in,out] then Action to update
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__update_action_for_orderings(pcmk_action_t *then,
pcmk_scheduler_t *scheduler)
{
GList *lpc = NULL;
uint32_t changed = pcmk__updated_none;
int last_flags = then->flags;
pcmk__rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
action_type_str(then->flags), then->uuid,
action_optional_str(then->flags),
action_runnable_str(then->flags), action_node_str(then));
if (then->required_runnable_before > 0) {
/* Initialize current known "runnable before" actions. As
* update_action_for_ordering_flags() is called for each of then's
* before actions, this number will increment as runnable 'first'
* actions are encountered.
*/
then->runnable_before = 0;
/* The pcmk__ar_min_runnable clause of
* update_action_for_ordering_flags() (called below)
* will reset runnable if appropriate.
*/
pcmk__clear_action_flags(then, pcmk__action_runnable);
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *other = lpc->data;
pcmk_action_t *first = other->action;
pcmk_node_t *then_node = then->node;
pcmk_node_t *first_node = first->node;
const uint32_t target = pcmk__rsc_node_assigned;
if ((first->rsc != NULL)
&& pcmk__is_group(first->rsc)
&& pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
first_node = first->rsc->priv->fns->location(first->rsc, NULL,
target);
if (first_node != NULL) {
pcmk__rsc_trace(first->rsc, "Found %s for 'first' %s",
pcmk__node_name(first_node), first->uuid);
}
}
if (pcmk__is_group(then->rsc)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
then_node = then->rsc->priv->fns->location(then->rsc, NULL, target);
if (then_node != NULL) {
pcmk__rsc_trace(then->rsc, "Found %s for 'then' %s",
pcmk__node_name(then_node), then->uuid);
}
}
// Disable constraint if it only applies when on same node, but isn't
if (pcmk_is_set(other->flags, pcmk__ar_if_on_same_node)
&& (first_node != NULL) && (then_node != NULL)
&& !pcmk__same_node(first_node, then_node)) {
pcmk__rsc_trace(then->rsc,
"Disabled ordering %s on %s then %s on %s: "
"not same node",
other->action->uuid, pcmk__node_name(first_node),
then->uuid, pcmk__node_name(then_node));
other->flags = pcmk__ar_none;
continue;
}
pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
if ((first->rsc != NULL)
&& pcmk_is_set(other->flags, pcmk__ar_then_cancels_first)
&& !pcmk_is_set(then->flags, pcmk__action_optional)) {
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any agent reload).
*/
pcmk__set_action_flags(other->action, pcmk__action_optional);
if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
pcmk__clear_rsc_flags(first->rsc, pcmk__rsc_reload);
}
}
if ((first->rsc != NULL) && (then->rsc != NULL)
&& (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
first = action_for_ordering(first);
}
if (first != other->action) {
pcmk__rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
then->uuid, first->uuid, other->action->uuid);
}
pcmk__rsc_trace(then->rsc,
"%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
first->uuid, first->flags, then->uuid, then->flags,
other->flags, action_node_str(first));
if (first == other->action) {
/* 'first' was not remapped (e.g. from 'start' to 'running'), which
* could mean it is a non-resource action, a primitive resource
* action, or already expanded.
*/
uint32_t first_flags, then_flags;
first_flags = action_flags_for_ordering(first, then_node);
then_flags = action_flags_for_ordering(then, first_node);
changed |= update_action_for_ordering_flags(first, then,
first_flags, then_flags,
other, scheduler);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
*/
} else if (order_actions(first, then, other->flags)) {
/* This was the first time 'first' and 'then' were associated,
* start again to get the new actions_before list
*/
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
pcmk__rsc_trace(then->rsc,
"Disabled ordering %s then %s in favor of %s "
"then %s",
other->action->uuid, then->uuid, first->uuid,
then->uuid);
other->flags = pcmk__ar_none;
}
if (pcmk_is_set(changed, pcmk__updated_first)) {
crm_trace("Re-processing %s and its 'after' actions "
"because it changed", first->uuid);
for (GList *lpc2 = first->actions_after; lpc2 != NULL;
lpc2 = lpc2->next) {
pcmk__related_action_t *other = lpc2->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
pcmk__update_action_for_orderings(first, scheduler);
}
}
if (then->required_runnable_before > 0) {
if (last_flags == then->flags) {
pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
} else {
pcmk__set_updated_flags(changed, then, pcmk__updated_then);
}
}
if (pcmk_is_set(changed, pcmk__updated_then)) {
crm_trace("Re-processing %s and its 'after' actions because it changed",
then->uuid);
if (pcmk_is_set(last_flags, pcmk__action_runnable)
&& !pcmk_is_set(then->flags, pcmk__action_runnable)) {
pcmk__block_colocation_dependents(then);
}
pcmk__update_action_for_orderings(then, scheduler);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
pcmk__related_action_t *other = lpc->data;
pcmk__update_action_for_orderings(other->action, scheduler);
}
}
}
static inline bool
is_primitive_action(const pcmk_action_t *action)
{
return (action != NULL) && pcmk__is_primitive(action->rsc);
}
/*!
* \internal
* \brief Clear a single action flag and set reason text
*
* \param[in,out] action Action whose flag should be cleared
* \param[in] flag Action flag that should be cleared
* \param[in] reason Action that is the reason why flag is being cleared
*/
#define clear_action_flag_because(action, flag, reason) do { \
if (pcmk_is_set((action)->flags, (flag))) { \
pcmk__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
pe_action_set_reason((action), reason_text, false); \
free(reason_text); \
} \
} \
} while (0)
/*!
* \internal
* \brief Update actions in an asymmetric ordering
*
* If the "first" action in an asymmetric ordering is unrunnable, make the
* "second" action unrunnable as well, if appropriate.
*
* \param[in] first 'First' action in an asymmetric ordering
* \param[in,out] then 'Then' action in an asymmetric ordering
*/
static void
handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
{
/* Only resource actions after an unrunnable 'first' action need updates for
* asymmetric ordering.
*/
if ((then->rsc == NULL)
|| pcmk_is_set(first->flags, pcmk__action_runnable)) {
return;
}
// Certain optional 'then' actions are unaffected by unrunnable 'first'
if (pcmk_is_set(then->flags, pcmk__action_optional)) {
enum rsc_role_e then_rsc_role;
then_rsc_role = then->rsc->priv->fns->state(then->rsc, true);
if ((then_rsc_role == pcmk_role_stopped)
&& pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* If 'then' should stop after 'first' but is already stopped, the
* ordering is irrelevant.
*/
return;
} else if ((then_rsc_role >= pcmk_role_started)
&& pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
&& pe__rsc_running_on_only(then->rsc, then->node)) {
/* Similarly if 'then' should start after 'first' but is already
* started on a single node.
*/
return;
}
}
// 'First' can't run, so 'then' can't either
clear_action_flag_because(then, pcmk__action_optional, first);
clear_action_flag_because(then, pcmk__action_runnable, first);
}
/*!
* \internal
* \brief Set action bits appropriately when pcmk__ar_intermediate_stop is used
*
* \param[in,out] first 'First' action in ordering
* \param[in,out] then 'Then' action in ordering
* \param[in] filter What action flags to care about
*
* \note pcmk__ar_intermediate_stop is set for "stop resource before starting
* it" and "stop later group member before stopping earlier group member"
*/
static void
handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
uint32_t filter)
{
const char *reason = NULL;
pcmk__assert(is_primitive_action(first) && is_primitive_action(then));
// We need to update the action in two cases:
// ... if 'then' is required
if (pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(then->flags, pcmk__action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
if (pcmk_is_set(filter, pcmk__action_runnable)
&& !pcmk_is_set(then->flags, pcmk__action_runnable)
&& pcmk_is_set(then->rsc->flags, pcmk__rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
if (reason == NULL) {
return;
}
pcmk__rsc_trace(first->rsc, "Handling %s -> %s for %s",
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
if (pcmk_is_set(first->flags, pcmk__action_runnable)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
// Make 'first' required if 'then' is required
if (!pcmk_is_set(then->flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
if (!pcmk_is_set(then->flags, pcmk__action_migratable)) {
clear_action_flag_because(first, pcmk__action_migratable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
if (!pcmk_is_set(first->flags, pcmk__action_optional)
&& !pcmk_is_set(first->flags, pcmk__action_runnable)) {
clear_action_flag_because(then, pcmk__action_runnable, first);
}
}
/*!
* \internal
* \brief Update two actions according to an ordering between them
*
* Given information about an ordering of two actions, update the actions' flags
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
* \param[in,out] first 'First' action in an ordering
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* (ignored)
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
* include pcmk__action_optional to affect only
* mandatory actions, and pcmk__action_runnable to
* affect only runnable actions)
* \param[in] type Group of enum pcmk__action_relation_flags to apply
* \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
uint32_t then_flags = 0U;
uint32_t first_flags = 0U;
pcmk__assert((first != NULL) && (then != NULL) && (scheduler != NULL));
then_flags = then->flags;
first_flags = first->flags;
if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
handle_asymmetric_ordering(first, then);
}
if (pcmk_is_set(type, pcmk__ar_then_implies_first)
&& !pcmk_is_set(then_flags, pcmk__action_optional)) {
// Then is required, and implies first should be, too
if (pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(flags, pcmk__action_optional)
&& pcmk_is_set(first_flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
if (pcmk_is_set(flags, pcmk__action_migratable)
&& !pcmk_is_set(then->flags, pcmk__action_migratable)) {
clear_action_flag_because(first, pcmk__action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
&& (then->rsc != NULL)
&& (then->rsc->priv->orig_role == pcmk_role_promoted)
&& pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(then->flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
if (pcmk_is_set(first->flags, pcmk__action_migratable)
&& !pcmk_is_set(then->flags, pcmk__action_migratable)) {
clear_action_flag_because(first, pcmk__action_migratable, then);
}
}
if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
&& pcmk_is_set(filter, pcmk__action_optional)) {
if (!pcmk_all_flags_set(then->flags, pcmk__action_migratable
|pcmk__action_runnable)) {
clear_action_flag_because(first, pcmk__action_runnable, then);
}
if (!pcmk_is_set(then->flags, pcmk__action_optional)) {
clear_action_flag_because(first, pcmk__action_optional, then);
}
}
if (pcmk_is_set(type, pcmk__ar_first_else_then)
&& pcmk_is_set(filter, pcmk__action_optional)
&& !pcmk_is_set(first->flags, pcmk__action_runnable)) {
clear_action_flag_because(then, pcmk__action_migratable, first);
pcmk__clear_action_flags(then, pcmk__action_pseudo);
}
if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
&& pcmk_is_set(filter, pcmk__action_runnable)
&& pcmk_is_set(then->flags, pcmk__action_runnable)
&& !pcmk_is_set(flags, pcmk__action_runnable)) {
clear_action_flag_because(then, pcmk__action_runnable, first);
clear_action_flag_because(then, pcmk__action_migratable, first);
}
if (pcmk_is_set(type, pcmk__ar_first_implies_then)
&& pcmk_is_set(filter, pcmk__action_optional)
&& pcmk_is_set(then->flags, pcmk__action_optional)
&& !pcmk_is_set(flags, pcmk__action_optional)
&& !pcmk_is_set(first->flags, pcmk__action_migratable)) {
clear_action_flag_because(then, pcmk__action_optional, first);
}
if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
handle_restart_ordering(first, then, filter);
}
if (then_flags != then->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
pcmk__rsc_trace(then->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'first' %s (%#.6x)",
then->uuid, pcmk__node_name(then->node),
then->flags, then_flags, first->uuid, first->flags);
if ((then->rsc != NULL) && (then->rsc->priv->parent != NULL)) {
// Required to handle "X_stop then X_start" for cloned groups
pcmk__update_action_for_orderings(then, scheduler);
}
}
if (first_flags != first->flags) {
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
pcmk__rsc_trace(first->rsc,
"%s on %s: flags are now %#.6x (was %#.6x) "
"because of 'then' %s (%#.6x)",
first->uuid, pcmk__node_name(first->node),
first->flags, first_flags, then->uuid, then->flags);
}
return changed;
}
/*!
* \internal
* \brief Trace-log an action (optionally with its dependent actions)
*
* \param[in] pre_text If not NULL, prefix the log with this plus ": "
* \param[in] action Action to log
* \param[in] details If true, recursively log dependent actions
*/
void
pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
const char *desc = NULL;
CRM_CHECK(action != NULL, return);
if (!pcmk_is_set(action->flags, pcmk__action_pseudo)) {
if (action->node != NULL) {
node_uname = action->node->priv->name;
node_uuid = action->node->priv->id;
} else {
node_uname = "<none>";
}
}
switch (pcmk__parse_action(action->task)) {
case pcmk__action_fence:
case pcmk__action_shutdown:
if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
desc = "Pseudo ";
} else if (pcmk_is_set(action->flags, pcmk__action_optional)) {
desc = "Optional ";
} else if (!pcmk_is_set(action->flags, pcmk__action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
default:
if (pcmk_is_set(action->flags, pcmk__action_optional)) {
desc = "Optional ";
} else if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
desc = "Pseudo ";
} else if (!pcmk_is_set(action->flags, pcmk__action_runnable)) {
desc = "!!Non-Startable!! ";
} else {
desc = "(Provisional) ";
}
crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
((pre_text == NULL)? "" : pre_text),
((pre_text == NULL)? "" : ": "),
desc, action->id, action->uuid,
(action->rsc? action->rsc->id : "<none>"),
(node_uname? "\ton " : ""), (node_uname? node_uname : ""),
(node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
(node_uuid? ")" : ""));
break;
}
if (details) {
const GList *iter = NULL;
const pcmk__related_action_t *other = NULL;
crm_trace("\t\t====== Preceding Actions");
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== Subsequent Actions");
for (iter = action->actions_after; iter != NULL; iter = iter->next) {
other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== End");
} else {
crm_trace("\t\t(before=%d, after=%d)",
g_list_length(action->actions_before),
g_list_length(action->actions_after));
}
}
/*!
* \internal
* \brief Create a new shutdown action for a node
*
* \param[in,out] node Node being shut down
*
* \return Newly created shutdown action for \p node
*/
pcmk_action_t *
pcmk__new_shutdown_action(pcmk_node_t *node)
{
char *shutdown_id = NULL;
pcmk_action_t *shutdown_op = NULL;
pcmk__assert(node != NULL);
shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
node->priv->name);
shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
node, FALSE, node->priv->scheduler);
pcmk__order_stops_before_shutdown(node, shutdown_op);
pcmk__insert_meta(shutdown_op, PCMK__META_OP_NO_WAIT, PCMK_VALUE_TRUE);
return shutdown_op;
}
/*!
* \internal
* \brief Calculate and add an operation digest to XML
*
* Calculate an operation digest, which enables us to later determine when a
* restart is needed due to the resource's parameters being changed, and add it
* to given XML.
*
* \param[in] op Operation result from executor
* \param[in,out] update XML to add digest to
*/
static void
add_op_digest_to_xml(const lrmd_event_data_t *op, xmlNode *update)
{
char *digest = NULL;
xmlNode *args_xml = NULL;
if (op->params == NULL) {
return;
}
args_xml = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
g_hash_table_foreach(op->params, hash2field, args_xml);
pcmk__filter_op_for_digest(args_xml);
- digest = pcmk__digest_operation(args_xml);
+ digest = pcmk__digest_op_params(args_xml);
crm_xml_add(update, PCMK__XA_OP_DIGEST, digest);
pcmk__xml_free(args_xml);
free(digest);
}
#define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
/*!
* \internal
* \brief Create XML for resource operation history update
*
* \param[in,out] parent Parent XML node to add to
* \param[in,out] op Operation event data
* \param[in] caller_version DC feature set
* \param[in] target_rc Expected result of operation
* \param[in] node Name of node on which operation was performed
* \param[in] origin Arbitrary description of update source
*
* \return Newly created XML node for history update
*/
xmlNode *
pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
const char *caller_version, int target_rc,
const char *node, const char *origin)
{
char *key = NULL;
char *magic = NULL;
char *op_id = NULL;
char *op_id_additional = NULL;
char *local_user_data = NULL;
const char *exit_reason = NULL;
xmlNode *xml_op = NULL;
const char *task = NULL;
CRM_CHECK(op != NULL, return NULL);
crm_trace("Creating history XML for %s-interval %s action for %s on %s "
"(DC version: %s, origin: %s)",
pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
((node == NULL)? "no node" : node), caller_version, origin);
task = op->op_type;
/* Record a successful agent reload as a start, and a failed one as a
* monitor, to make life easier for the scheduler when determining the
* current state.
*
* @COMPAT We should check "reload" here only if the operation was for a
* pre-OCF-1.1 resource agent, but we don't know that here, and we should
* only ever get results for actions scheduled by us, so we can reasonably
* assume any "reload" is actually a pre-1.1 agent reload.
*
* @TODO This remapping can make log messages with task confusing for users
* (for example, an "Initiating reload ..." followed by "... start ...
* confirmed"). Either do this remapping in the scheduler if possible, or
* store the original task in a new XML attribute for later logging.
*/
if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
NULL)) {
if (op->op_status == PCMK_EXEC_DONE) {
task = PCMK_ACTION_START;
} else {
task = PCMK_ACTION_MONITOR;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
CRM_LOG_ASSERT(n_type != NULL);
CRM_LOG_ASSERT(n_task != NULL);
op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
if (op->op_status != PCMK_EXEC_PENDING) {
/* Ignore notify errors.
*
* @TODO It might be better to keep the correct result here, and
* ignore it in process_graph_event().
*/
lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
}
/* Migration history is preserved separately, which usually matters for
* multiple nodes and is important for future cluster transitions.
*/
} else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
op_id = strdup(key);
} else if (did_rsc_op_fail(op, target_rc)) {
op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
if (op->interval_ms == 0) {
/* Ensure 'last' gets updated, in case PCMK_META_RECORD_PENDING is
* true
*/
op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
}
exit_reason = op->exit_reason;
} else if (op->interval_ms > 0) {
op_id = strdup(key);
} else {
op_id = pcmk__op_key(op->rsc_id, "last", 0);
}
again:
xml_op = pcmk__xe_first_child(parent, PCMK__XE_LRM_RSC_OP, PCMK_XA_ID,
op_id);
if (xml_op == NULL) {
xml_op = pcmk__xe_create(parent, PCMK__XE_LRM_RSC_OP);
}
if (op->user_data == NULL) {
crm_debug("Generating fake transition key for: " PCMK__OP_FMT
" %d from %s", op->rsc_id, op->op_type, op->interval_ms,
op->call_id, origin);
local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
FAKE_TE_ID);
op->user_data = local_user_data;
}
if (magic == NULL) {
magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
(const char *) op->user_data);
}
crm_xml_add(xml_op, PCMK_XA_ID, op_id);
crm_xml_add(xml_op, PCMK__XA_OPERATION_KEY, key);
crm_xml_add(xml_op, PCMK_XA_OPERATION, task);
crm_xml_add(xml_op, PCMK_XA_CRM_DEBUG_ORIGIN, origin);
crm_xml_add(xml_op, PCMK_XA_CRM_FEATURE_SET, caller_version);
crm_xml_add(xml_op, PCMK__XA_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, PCMK__XA_TRANSITION_MAGIC, magic);
crm_xml_add(xml_op, PCMK_XA_EXIT_REASON, pcmk__s(exit_reason, ""));
crm_xml_add(xml_op, PCMK__META_ON_NODE, node); // For context during triage
crm_xml_add_int(xml_op, PCMK__XA_CALL_ID, op->call_id);
crm_xml_add_int(xml_op, PCMK__XA_RC_CODE, op->rc);
crm_xml_add_int(xml_op, PCMK__XA_OP_STATUS, op->op_status);
crm_xml_add_ms(xml_op, PCMK_META_INTERVAL, op->interval_ms);
if ((op->t_run > 0) || (op->t_rcchange > 0) || (op->exec_time > 0)
|| (op->queue_time > 0)) {
crm_trace("Timing data (" PCMK__OP_FMT "): "
"last=%lld change=%lld exec=%u queue=%u",
op->rsc_id, op->op_type, op->interval_ms,
(long long) op->t_run, (long long) op->t_rcchange,
op->exec_time, op->queue_time);
if ((op->interval_ms > 0) && (op->t_rcchange > 0)) {
// Recurring ops may have changed rc after initial run
crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
(long long) op->t_rcchange);
} else {
crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
(long long) op->t_run);
}
crm_xml_add_int(xml_op, PCMK_XA_EXEC_TIME, op->exec_time);
crm_xml_add_int(xml_op, PCMK_XA_QUEUE_TIME, op->queue_time);
}
if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* Record PCMK__META_MIGRATE_SOURCE and PCMK__META_MIGRATE_TARGET always
* for migrate ops.
*/
const char *name = PCMK__META_MIGRATE_SOURCE;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
name = PCMK__META_MIGRATE_TARGET;
crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
}
add_op_digest_to_xml(op, xml_op);
if (op_id_additional) {
free(op_id);
op_id = op_id_additional;
op_id_additional = NULL;
goto again;
}
if (local_user_data) {
free(local_user_data);
op->user_data = NULL;
}
free(magic);
free(op_id);
free(key);
return xml_op;
}
/*!
* \internal
* \brief Check whether an action shutdown-locks a resource to a node
*
* If the PCMK_OPT_SHUTDOWN_LOCK cluster property is set, resources will not be
* recovered on a different node if cleanly stopped, and may start only on that
* same node. This function checks whether that applies to a given action, so
* that the transition graph can be marked appropriately.
*
* \param[in] action Action to check
*
* \return true if \p action locks its resource to the action's node,
* otherwise false
*/
bool
pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
{
// Only resource actions taking place on resource's lock node are locked
if ((action == NULL) || (action->rsc == NULL)
|| !pcmk__same_node(action->node, action->rsc->priv->lock_node)) {
return false;
}
/* During shutdown, only stops are locked (otherwise, another action such as
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && (action->task != NULL)
&& (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
return false;
}
return true;
}
/* lowest to highest */
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
const pcmk__related_action_t *action_wrapper2 = a;
const pcmk__related_action_t *action_wrapper1 = b;
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (action_wrapper1->action->id < action_wrapper2->action->id) {
return 1;
}
if (action_wrapper1->action->id > action_wrapper2->action->id) {
return -1;
}
return 0;
}
/*!
* \internal
* \brief Remove any duplicate action inputs, merging action flags
*
* \param[in,out] action Action whose inputs should be checked
*/
void
pcmk__deduplicate_action_inputs(pcmk_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
pcmk__related_action_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
pcmk__related_action_t *input = item->data;
next = item->next;
if ((last_input != NULL)
&& (input->action->id == last_input->action->id)) {
crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
input->action->uuid, input->action->id,
action->uuid, action->id);
/* For the purposes of scheduling, the ordering flags no longer
* matter, but crm_simulate looks at certain ones when creating a
* dot graph. Combining the flags is sufficient for that purpose.
*/
pcmk__set_relation_flags(last_input->flags, input->flags);
if (input->graphed) {
last_input->graphed = true;
}
free(item->data);
action->actions_before = g_list_delete_link(action->actions_before,
item);
} else {
last_input = input;
input->graphed = false;
}
}
}
/*!
* \internal
* \brief Output all scheduled actions
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__output_actions(pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = scheduler->priv->out;
// Output node (non-resource) actions
for (GList *iter = scheduler->priv->actions;
iter != NULL; iter = iter->next) {
char *node_name = NULL;
char *task = NULL;
pcmk_action_t *action = (pcmk_action_t *) iter->data;
if (action->rsc != NULL) {
continue; // Resource actions will be output later
} else if (pcmk_is_set(action->flags, pcmk__action_optional)) {
continue; // This action was not scheduled
}
if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
pcmk__str_none)) {
task = strdup("Shutdown");
} else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta,
PCMK__META_STONITH_ACTION);
task = crm_strdup_printf("Fence (%s)", op);
} else {
continue; // Don't display other node action types
}
if (pcmk__is_guest_or_bundle_node(action->node)) {
const pcmk_resource_t *remote = action->node->priv->remote;
node_name = crm_strdup_printf("%s (resource: %s)",
pcmk__node_name(action->node),
remote->priv->launcher->id);
} else if (action->node != NULL) {
node_name = crm_strdup_printf("%s", pcmk__node_name(action->node));
}
out->message(out, "node-action", task, node_name, action->reason);
free(node_name);
free(task);
}
// Output resource actions
for (GList *iter = scheduler->priv->resources;
iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->priv->cmds->output_actions(rsc);
}
}
/*!
* \internal
* \brief Get action name needed to compare digest for configuration changes
*
* \param[in] task Action name from history
* \param[in] interval_ms Action interval (in milliseconds)
*
* \return Action name whose digest should be compared
*/
static const char *
task_for_digest(const char *task, guint interval_ms)
{
/* Certain actions need to be compared against the parameters used to start
* the resource.
*/
if ((interval_ms == 0)
&& pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
PCMK_ACTION_PROMOTE, NULL)) {
task = PCMK_ACTION_START;
}
return task;
}
/*!
* \internal
* \brief Check whether only sanitized parameters to an action changed
*
* When collecting CIB files for troubleshooting, crm_report will mask
* sensitive resource parameters. If simulations were run using that, affected
* resources would appear to need a restart, which would complicate
* troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
* parameters. This function used that digest to check whether only masked
* parameters are different.
*
* \param[in] xml_op Resource history entry with secure digest
* \param[in] digest_data Operation digest information being compared
* \param[in] scheduler Scheduler data
*
* \return true if only sanitized parameters changed, otherwise false
*/
static bool
only_sanitized_changed(const xmlNode *xml_op,
const pcmk__op_digest_t *digest_data,
const pcmk_scheduler_t *scheduler)
{
const char *digest_secure = NULL;
if (!pcmk_is_set(scheduler->flags, pcmk__sched_sanitized)) {
// The scheduler is not being run as a simulation
return false;
}
digest_secure = crm_element_value(xml_op, PCMK__XA_OP_SECURE_DIGEST);
return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
&& (digest_data->digest_secure_calc != NULL)
&& (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
}
/*!
* \internal
* \brief Force a restart due to a configuration change
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action whose configuration changed
* \param[in] interval_ms Action interval (in milliseconds)
* \param[in,out] node Node where resource should be restarted
*/
static void
force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
pcmk_node_t *node)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE,
rsc->priv->scheduler);
pe_action_set_reason(required, "resource definition change", true);
trigger_unfencing(rsc, node, "Device parameters changed", NULL,
rsc->priv->scheduler);
}
/*!
* \internal
* \brief Schedule a reload of a resource on a node
*
* \param[in,out] data Resource to reload
* \param[in] user_data Where resource should be reloaded
*/
static void
schedule_reload(gpointer data, gpointer user_data)
{
pcmk_resource_t *rsc = data;
const pcmk_node_t *node = user_data;
pcmk_action_t *reload = NULL;
// For collective resources, just call recursively for children
if (rsc->priv->variant > pcmk__rsc_variant_primitive) {
g_list_foreach(rsc->priv->children, schedule_reload, user_data);
return;
}
// Skip the reload in certain situations
if ((node == NULL)
|| !pcmk_is_set(rsc->flags, pcmk__rsc_managed)
|| pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
pcmk__rsc_trace(rsc, "Skip reload of %s:%s%s %s",
rsc->id,
pcmk_is_set(rsc->flags, pcmk__rsc_managed)? "" : " unmanaged",
pcmk_is_set(rsc->flags, pcmk__rsc_failed)? " failed" : "",
(node == NULL)? "inactive" : node->priv->name);
return;
}
/* If a resource's configuration changed while a start was pending,
* force a full restart instead of a reload.
*/
if (pcmk_is_set(rsc->flags, pcmk__rsc_start_pending)) {
pcmk__rsc_trace(rsc,
"%s: preventing agent reload because start pending",
rsc->id);
custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->priv->scheduler);
return;
}
// Schedule the reload
pcmk__set_rsc_flags(rsc, pcmk__rsc_reload);
reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
FALSE, rsc->priv->scheduler);
pe_action_set_reason(reload, "resource definition change", FALSE);
// Set orderings so that a required stop or demote cancels the reload
pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->priv->scheduler);
pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->priv->scheduler);
}
/*!
* \internal
* \brief Handle any configuration change for an action
*
* Given an action from resource history, if the resource's configuration
* changed since the action was done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, etc.).
*
* \param[in,out] rsc Resource that action is for
* \param[in,out] node Node that action was on
* \param[in] xml_op Action XML from resource history
*
* \return true if action configuration changed, otherwise false
*/
bool
pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op)
{
guint interval_ms = 0;
const char *task = NULL;
const pcmk__op_digest_t *digest_data = NULL;
CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
return false);
task = crm_element_value(xml_op, PCMK_XA_OPERATION);
CRM_CHECK(task != NULL, return false);
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms);
// If this is a recurring action, check whether it has been orphaned
if (interval_ms > 0) {
if (pcmk__find_action_config(rsc, task, interval_ms, false) != NULL) {
pcmk__rsc_trace(rsc,
"%s-interval %s for %s on %s is in configuration",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
} else if (pcmk_is_set(rsc->priv->scheduler->flags,
pcmk__sched_cancel_removed_actions)) {
pcmk__schedule_cancel(rsc,
crm_element_value(xml_op, PCMK__XA_CALL_ID),
task, interval_ms, node, "orphan");
return true;
} else {
pcmk__rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
return true;
}
}
crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node));
task = task_for_digest(task, interval_ms);
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->priv->scheduler);
if (only_sanitized_changed(xml_op, digest_data, rsc->priv->scheduler)) {
if (!pcmk__is_daemon && (rsc->priv->scheduler->priv->out != NULL)) {
pcmk__output_t *out = rsc->priv->scheduler->priv->out;
out->info(out,
"Only 'private' parameters to %s-interval %s for %s "
"on %s changed: %s",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node),
crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC));
}
return false;
}
switch (digest_data->rc) {
case pcmk__digest_restart:
crm_log_xml_debug(digest_data->params_restart, "params:restart");
force_restart(rsc, task, interval_ms, node);
return true;
case pcmk__digest_unknown:
case pcmk__digest_mismatch:
// Changes that can potentially be handled by an agent reload
if (interval_ms > 0) {
/* Recurring actions aren't reloaded per se, they are just
* re-scheduled so the next run uses the new parameters.
* The old instance will be cancelled automatically.
*/
crm_log_xml_debug(digest_data->params_all, "params:reschedule");
pcmk__reschedule_recurring(rsc, task, interval_ms, node);
} else if (crm_element_value(xml_op,
PCMK__XA_OP_RESTART_DIGEST) != NULL) {
// Agent supports reload, so use it
trigger_unfencing(rsc, node,
"Device parameters changed (reload)", NULL,
rsc->priv->scheduler);
crm_log_xml_debug(digest_data->params_all, "params:reload");
schedule_reload((gpointer) rsc, (gpointer) node);
} else {
pcmk__rsc_trace(rsc,
"Restarting %s "
"because agent doesn't support reload",
rsc->id);
crm_log_xml_debug(digest_data->params_restart,
"params:restart");
force_restart(rsc, task, interval_ms, node);
}
return true;
default:
break;
}
return false;
}
/*!
* \internal
* \brief Create a list of resource's action history entries, sorted by call ID
*
* \param[in] rsc_entry Resource's \c PCMK__XE_LRM_RSC_OP status XML
* \param[out] start_index Where to store index of start-like action, if any
* \param[out] stop_index Where to store index of stop action, if any
*/
static GList *
rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
{
GList *ops = NULL;
for (xmlNode *rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP,
NULL, NULL);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) {
ops = g_list_prepend(ops, rsc_op);
}
ops = g_list_sort(ops, sort_op_by_callid);
calculate_active_ops(ops, start_index, stop_index);
return ops;
}
/*!
* \internal
* \brief Process a resource's action history from the CIB status
*
* Given a resource's action history, if the resource's configuration
* changed since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in] rsc_entry Resource's \c PCMK__XE_LRM_RSC_OP status XML
* \param[in,out] rsc Resource whose history is being processed
* \param[in,out] node Node whose history is being processed
*/
static void
process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
pcmk_node_t *node)
{
int offset = -1;
int stop_index = 0;
int start_index = 0;
GList *sorted_op_list = NULL;
if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
if (pcmk__is_anonymous_clone(pe__const_top_resource(rsc, false))) {
/* @TODO Should this be done for bundled primitives as well? Added
* by 2ac43ae31
*/
pcmk__rsc_trace(rsc,
"Skipping configuration check "
"for orphaned clone instance %s",
rsc->id);
} else {
pcmk__rsc_trace(rsc,
"Skipping configuration check and scheduling "
"clean-up for orphaned resource %s", rsc->id);
pcmk__schedule_cleanup(rsc, node, false);
}
return;
}
if (pe_find_node_id(rsc->priv->active_nodes,
node->priv->id) == NULL) {
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
pcmk__schedule_cleanup(rsc, node, false);
}
pcmk__rsc_trace(rsc,
"Skipping configuration check for %s "
"because no longer active on %s",
rsc->id, pcmk__node_name(node));
return;
}
pcmk__rsc_trace(rsc, "Checking for configuration changes for %s on %s",
rsc->id, pcmk__node_name(node));
if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
pcmk__schedule_cleanup(rsc, node, false);
}
sorted_op_list = rsc_history_as_list(rsc_entry, &start_index, &stop_index);
if (start_index < stop_index) {
return; // Resource is stopped
}
for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
xmlNode *rsc_op = (xmlNode *) iter->data;
const char *task = NULL;
guint interval_ms = 0;
if (++offset < start_index) {
// Skip actions that happened before a start
continue;
}
task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
if ((interval_ms > 0)
&& (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)
|| node->details->maintenance)) {
// Maintenance mode cancels recurring operations
pcmk__schedule_cancel(rsc,
crm_element_value(rsc_op, PCMK__XA_CALL_ID),
task, interval_ms, node, "maintenance mode");
} else if ((interval_ms > 0)
|| pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
PCMK_ACTION_START,
PCMK_ACTION_PROMOTE,
PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't assigned resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
pcmk__add_param_check(rsc_op, rsc, node, pcmk__check_active);
} else if (pcmk__check_action_config(rsc, node, rsc_op)
&& (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL) != 0)) {
pe__clear_failcount(rsc, node, "action definition changed",
rsc->priv->scheduler);
}
}
}
g_list_free(sorted_op_list);
}
/*!
* \internal
* \brief Process a node's action history from the CIB status
*
* Given a node's resource history, if the resource's configuration changed
* since the actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] node Node whose history is being processed
* \param[in] lrm_rscs Node's \c PCMK__XE_LRM_RESOURCES from CIB status XML
*/
static void
process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
{
crm_trace("Processing node history for %s", pcmk__node_name(node));
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rscs,
PCMK__XE_LRM_RESOURCE,
NULL, NULL);
rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
if (rsc_entry->children != NULL) {
GList *result = pcmk__rscs_matching_id(pcmk__xe_id(rsc_entry),
node->priv->scheduler);
for (GList *iter = result; iter != NULL; iter = iter->next) {
pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (pcmk__is_primitive(rsc)) {
process_rsc_history(rsc_entry, rsc, node);
}
}
g_list_free(result);
}
}
}
// XPath to find a node's resource history
#define XPATH_NODE_HISTORY "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
"/" PCMK__XE_NODE_STATE \
"[@" PCMK_XA_UNAME "='%s']" \
"/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES
/*!
* \internal
* \brief Process any resource configuration changes in the CIB status
*
* Go through all nodes' resource history, and if a resource's configuration
* changed since its actions were done, schedule any actions needed (restart,
* reload, unfencing, rescheduling recurring actions, clean-up, etc.).
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
* \param[in,out] scheduler Scheduler data
*/
void
pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
{
crm_trace("Check resource and action configuration for changes");
/* Rather than iterate through the status section, iterate through the nodes
* and search for the appropriate status subsection for each. This skips
* orphaned nodes and lets us eliminate some cases before searching the XML.
*/
for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* Don't bother checking actions for a node that can't run actions ...
* unless it's in maintenance mode, in which case we still need to
* cancel any existing recurring monitors.
*/
if (node->details->maintenance
|| pcmk__node_available(node, false, false)) {
char *xpath = NULL;
xmlNode *history = NULL;
xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->priv->name);
history = pcmk__xpath_find_one(scheduler->input->doc, xpath,
LOG_NEVER);
free(xpath);
process_node_history(node, history);
}
}
}
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
index a4c603831c..b61b44b689 100644
--- a/lib/pengine/pe_digest.c
+++ b/lib/pengine/pe_digest.c
@@ -1,614 +1,614 @@
/*
* Copyright 2004-2025 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include <crm_internal.h>
#include <glib.h>
#include <stdbool.h>
#include <crm/crm.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/pengine/internal.h>
#include "pe_status_private.h"
extern bool pcmk__is_daemon;
/*!
* \internal
* \brief Free an operation digest cache entry
*
* \param[in,out] ptr Pointer to cache entry to free
*
* \note The argument is a gpointer so this can be used as a hash table
* free function.
*/
void
pe__free_digests(gpointer ptr)
{
pcmk__op_digest_t *data = ptr;
if (data != NULL) {
pcmk__xml_free(data->params_all);
pcmk__xml_free(data->params_secure);
pcmk__xml_free(data->params_restart);
free(data->digest_all_calc);
free(data->digest_restart_calc);
free(data->digest_secure_calc);
free(data);
}
}
// Return true if XML attribute name is not substring of a given string
static bool
attr_not_in_string(xmlAttrPtr a, void *user_data)
{
bool filter = false;
char *name = crm_strdup_printf(" %s ", (const char *) a->name);
if (strstr((const char *) user_data, name) == NULL) {
crm_trace("Filtering %s (not found in '%s')",
(const char *) a->name, (const char *) user_data);
filter = true;
}
free(name);
return filter;
}
// Return true if XML attribute name is substring of a given string
static bool
attr_in_string(xmlAttrPtr a, void *user_data)
{
bool filter = false;
char *name = crm_strdup_printf(" %s ", (const char *) a->name);
if (strstr((const char *) user_data, name) != NULL) {
crm_trace("Filtering %s (found in '%s')",
(const char *) a->name, (const char *) user_data);
filter = true;
}
free(name);
return filter;
}
/*!
* \internal
* \brief Add digest of all parameters to a digest cache entry
*
* \param[out] data Digest cache entry to modify
* \param[in,out] rsc Resource that action was for
* \param[in] node Node action was performed on
* \param[in] params Resource parameters evaluated for node
* \param[in] task Name of action performed
* \param[in,out] interval_ms Action's interval (will be reset if in overrides)
* \param[in] xml_op Unused
* \param[in] op_version CRM feature set to use for digest calculation
* \param[in] overrides Key/value table to override resource parameters
* \param[in,out] scheduler Scheduler data
*/
static void
calculate_main_digest(pcmk__op_digest_t *data, pcmk_resource_t *rsc,
const pcmk_node_t *node, GHashTable *params,
const char *task, guint *interval_ms,
const xmlNode *xml_op, const char *op_version,
GHashTable *overrides, pcmk_scheduler_t *scheduler)
{
xmlNode *action_config = NULL;
data->params_all = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
/* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
* that themselves are Pacemaker Remote nodes
*/
(void) pe__add_bundle_remote_name(rsc, data->params_all,
PCMK_REMOTE_RA_ADDR);
if (overrides != NULL) {
// If interval was overridden, reset it
const char *meta_name = CRM_META "_" PCMK_META_INTERVAL;
const char *interval_s = g_hash_table_lookup(overrides, meta_name);
if (interval_s != NULL) {
long long value_ll;
if ((pcmk__scan_ll(interval_s, &value_ll, 0LL) == pcmk_rc_ok)
&& (value_ll >= 0) && (value_ll <= G_MAXUINT)) {
*interval_ms = (guint) value_ll;
}
}
// Add overrides to list of all parameters
g_hash_table_foreach(overrides, hash2field, data->params_all);
}
// Add provided instance parameters
g_hash_table_foreach(params, hash2field, data->params_all);
// Find action configuration XML in CIB
action_config = pcmk__find_action_config(rsc, task, *interval_ms, true);
/* Add action-specific resource instance attributes to the digest list.
*
* If this is a one-time action with action-specific instance attributes,
* enforce a restart instead of reload-agent in case the main digest doesn't
* match, even if the restart digest does. This ensures any changes of the
* action-specific parameters get applied for this specific action, and
* digests calculated for the resulting history will be correct. Default the
* result to RSC_DIGEST_RESTART for the case where the main digest doesn't
* match.
*/
params = pcmk__unpack_action_rsc_params(action_config, node->priv->attrs,
scheduler);
if ((*interval_ms == 0) && (g_hash_table_size(params) > 0)) {
data->rc = pcmk__digest_restart;
}
g_hash_table_foreach(params, hash2field, data->params_all);
g_hash_table_destroy(params);
// Add action meta-attributes
params = pcmk__unpack_action_meta(rsc, node, task, *interval_ms,
action_config);
g_hash_table_foreach(params, hash2metafield, data->params_all);
g_hash_table_destroy(params);
pcmk__filter_op_for_digest(data->params_all);
- data->digest_all_calc = pcmk__digest_operation(data->params_all);
+ data->digest_all_calc = pcmk__digest_op_params(data->params_all);
}
// Return true if XML attribute name is a Pacemaker-defined fencing parameter
static bool
is_fence_param(xmlAttrPtr attr, void *user_data)
{
return pcmk_stonith_param((const char *) attr->name);
}
/*!
* \internal
* \brief Add secure digest to a digest cache entry
*
* \param[out] data Digest cache entry to modify
* \param[in] rsc Resource that action was for
* \param[in] params Resource parameters evaluated for node
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] op_version CRM feature set to use for digest calculation
* \param[in] overrides Key/value hash table to override resource parameters
*/
static void
calculate_secure_digest(pcmk__op_digest_t *data, const pcmk_resource_t *rsc,
GHashTable *params, const xmlNode *xml_op,
const char *op_version, GHashTable *overrides)
{
const char *class = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS);
const char *secure_list = NULL;
bool old_version = (compare_version(op_version, "3.16.0") < 0);
if (xml_op == NULL) {
secure_list = " passwd password user ";
} else {
secure_list = crm_element_value(xml_op, PCMK__XA_OP_SECURE_PARAMS);
}
if (old_version) {
data->params_secure = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
if (overrides != NULL) {
g_hash_table_foreach(overrides, hash2field, data->params_secure);
}
g_hash_table_foreach(params, hash2field, data->params_secure);
} else {
// Start with a copy of all parameters
data->params_secure = pcmk__xml_copy(NULL, data->params_all);
}
if (secure_list != NULL) {
pcmk__xe_remove_matching_attrs(data->params_secure, false,
attr_in_string, (void *) secure_list);
}
if (old_version
&& pcmk_is_set(pcmk_get_ra_caps(class),
pcmk_ra_cap_fence_params)) {
/* For stonith resources, Pacemaker adds special parameters,
* but these are not listed in fence agent meta-data, so with older
* versions of DC, the controller will not hash them. That means we have
* to filter them out before calculating our hash for comparison.
*/
pcmk__xe_remove_matching_attrs(data->params_secure, false,
is_fence_param, NULL);
}
pcmk__filter_op_for_digest(data->params_secure);
/* CRM_meta_timeout *should* be part of a digest for recurring operations.
* However, with older versions of DC, the controller does not add timeout
* to secure digests, because it only includes parameters declared by the
* resource agent.
* Remove any timeout that made it this far, to match.
*/
if (old_version) {
pcmk__xe_remove_attr(data->params_secure,
CRM_META "_" PCMK_META_TIMEOUT);
}
- data->digest_secure_calc = pcmk__digest_operation(data->params_secure);
+ data->digest_secure_calc = pcmk__digest_op_params(data->params_secure);
}
/*!
* \internal
* \brief Add restart digest to a digest cache entry
*
* \param[out] data Digest cache entry to modify
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] op_version CRM feature set to use for digest calculation
*
* \note This function doesn't need to handle overrides because it starts with
* data->params_all, which already has overrides applied.
*/
static void
calculate_restart_digest(pcmk__op_digest_t *data, const xmlNode *xml_op,
const char *op_version)
{
const char *value = NULL;
// We must have XML of resource operation history
if (xml_op == NULL) {
return;
}
// And the history must have a restart digest to compare against
if (crm_element_value(xml_op, PCMK__XA_OP_RESTART_DIGEST) == NULL) {
return;
}
// Start with a copy of all parameters
data->params_restart = pcmk__xml_copy(NULL, data->params_all);
// Then filter out reloadable parameters, if any
value = crm_element_value(xml_op, PCMK__XA_OP_FORCE_RESTART);
if (value != NULL) {
pcmk__xe_remove_matching_attrs(data->params_restart, false,
attr_not_in_string, (void *) value);
}
- data->digest_restart_calc = pcmk__digest_operation(data->params_restart);
+ data->digest_restart_calc = pcmk__digest_op_params(data->params_restart);
}
/*!
* \internal
* \brief Create a new digest cache entry with calculated digests
*
* \param[in,out] rsc Resource that action was for
* \param[in] task Name of action performed
* \param[in,out] interval_ms Action's interval (will be reset if in overrides)
* \param[in] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] overrides Key/value table to override resource parameters
* \param[in] calc_secure Whether to calculate secure digest
* \param[in,out] scheduler Scheduler data
*
* \return Pointer to new digest cache entry (or NULL on memory error)
* \note It is the caller's responsibility to free the result using
* pe__free_digests().
*/
pcmk__op_digest_t *
pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
guint *interval_ms, const pcmk_node_t *node,
const xmlNode *xml_op, GHashTable *overrides,
bool calc_secure, pcmk_scheduler_t *scheduler)
{
pcmk__op_digest_t *data = NULL;
const char *op_version = NULL;
GHashTable *params = NULL;
CRM_CHECK(scheduler != NULL, return NULL);
data = calloc(1, sizeof(pcmk__op_digest_t));
if (data == NULL) {
pcmk__sched_err(scheduler,
"Could not allocate memory for operation digest");
return NULL;
}
data->rc = pcmk__digest_match;
if (xml_op != NULL) {
op_version = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET);
}
if ((op_version == NULL) && (scheduler->input != NULL)) {
op_version = crm_element_value(scheduler->input,
PCMK_XA_CRM_FEATURE_SET);
}
if (op_version == NULL) {
op_version = CRM_FEATURE_SET;
}
params = pe_rsc_params(rsc, node, scheduler);
calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
op_version, overrides, scheduler);
if (calc_secure) {
calculate_secure_digest(data, rsc, params, xml_op, op_version,
overrides);
}
calculate_restart_digest(data, xml_op, op_version);
return data;
}
/*!
* \internal
* \brief Calculate action digests and store in node's digest cache
*
* \param[in,out] rsc Resource that action was for
* \param[in] task Name of action performed
* \param[in] interval_ms Action's interval
* \param[in,out] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] calc_secure Whether to calculate secure digest
* \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry
*/
static pcmk__op_digest_t *
rsc_action_digest(pcmk_resource_t *rsc, const char *task, guint interval_ms,
pcmk_node_t *node, const xmlNode *xml_op,
bool calc_secure, pcmk_scheduler_t *scheduler)
{
pcmk__op_digest_t *data = NULL;
char *key = pcmk__op_key(rsc->id, task, interval_ms);
data = g_hash_table_lookup(node->priv->digest_cache, key);
if (data == NULL) {
data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
NULL, calc_secure, scheduler);
pcmk__assert(data != NULL);
g_hash_table_insert(node->priv->digest_cache, strdup(key), data);
}
free(key);
return data;
}
/*!
* \internal
* \brief Calculate operation digests and compare against an XML history entry
*
* \param[in,out] rsc Resource to check
* \param[in] xml_op Resource history XML
* \param[in,out] node Node to use for digest calculation
* \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry, with comparison result set
*/
pcmk__op_digest_t *
rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op,
pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pcmk__op_digest_t *data = NULL;
guint interval_ms = 0;
const char *op_version;
const char *task = crm_element_value(xml_op, PCMK_XA_OPERATION);
const char *digest_all;
const char *digest_restart;
pcmk__assert(node != NULL);
op_version = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET);
digest_all = crm_element_value(xml_op, PCMK__XA_OP_DIGEST);
digest_restart = crm_element_value(xml_op, PCMK__XA_OP_RESTART_DIGEST);
crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms);
data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
pcmk_is_set(scheduler->flags,
pcmk__sched_sanitized),
scheduler);
if (!pcmk__str_eq(data->digest_restart_calc, digest_restart,
pcmk__str_null_matches)) {
pcmk__rsc_info(rsc,
"Parameter change for %s-interval %s of %s on %s "
"requires restart (hash now %s vs. %s "
"with op feature set %s for transition %s)",
pcmk__readable_interval(interval_ms), task, rsc->id,
pcmk__node_name(node), data->digest_restart_calc,
pcmk__s(digest_restart, "missing"), op_version,
crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC));
data->rc = pcmk__digest_restart;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = pcmk__digest_unknown;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
/* Given a non-recurring operation with extra parameters configured,
* in case that the main digest doesn't match, even if the restart
* digest matches, enforce a restart rather than a reload-agent anyway.
* So that it ensures any changes of the extra parameters get applied
* for this specific operation, and the digests calculated for the
* resulting PCMK__XE_LRM_RSC_OP will be correct.
* Preserve the implied rc pcmk__digest_restart for the case that the
* main digest doesn't match.
*/
if ((interval_ms == 0) && (data->rc == pcmk__digest_restart)) {
pcmk__rsc_info(rsc,
"Parameters containing extra ones to %ums-interval"
" %s action for %s on %s "
"changed: hash was %s vs. now %s (restart:%s) %s",
interval_ms, task, rsc->id, pcmk__node_name(node),
pcmk__s(digest_all, "missing"),
data->digest_all_calc, op_version,
crm_element_value(xml_op,
PCMK__XA_TRANSITION_MAGIC));
} else {
pcmk__rsc_info(rsc,
"Parameters to %ums-interval %s action for %s on %s "
"changed: hash was %s vs. now %s (%s:%s) %s",
interval_ms, task, rsc->id, pcmk__node_name(node),
pcmk__s(digest_all, "missing"),
data->digest_all_calc,
(interval_ms > 0)? "reschedule" : "reload",
op_version,
crm_element_value(xml_op,
PCMK__XA_TRANSITION_MAGIC));
data->rc = pcmk__digest_mismatch;
}
} else {
data->rc = pcmk__digest_match;
}
return data;
}
/*!
* \internal
* \brief Create an unfencing summary for use in special node attribute
*
* Create a string combining a fence device's resource ID, agent type, and
* parameter digest (whether for all parameters or just non-private parameters).
* This can be stored in a special node attribute, allowing us to detect changes
* in either the agent type or parameters, to know whether unfencing must be
* redone or can be safely skipped when the device's history is cleaned.
*
* \param[in] rsc_id Fence device resource ID
* \param[in] agent_type Fence device agent
* \param[in] param_digest Fence device parameter digest
*
* \return Newly allocated string with unfencing digest
* \note The caller is responsible for freeing the result.
*/
static inline char *
create_unfencing_summary(const char *rsc_id, const char *agent_type,
const char *param_digest)
{
return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
}
/*!
* \internal
* \brief Check whether a node can skip unfencing
*
* Check whether a fence device's current definition matches a node's
* stored summary of when it was last unfenced by the device.
*
* \param[in] rsc_id Fence device's resource ID
* \param[in] agent Fence device's agent type
* \param[in] digest_calc Fence device's current parameter digest
* \param[in] node_summary Value of node's special unfencing node attribute
* (a comma-separated list of unfencing summaries for
* all devices that have unfenced this node)
*
* \return TRUE if digest matches, FALSE otherwise
*/
static bool
unfencing_digest_matches(const char *rsc_id, const char *agent,
const char *digest_calc, const char *node_summary)
{
bool matches = FALSE;
if (rsc_id && agent && digest_calc && node_summary) {
char *search_secure = create_unfencing_summary(rsc_id, agent,
digest_calc);
/* The digest was calculated including the device ID and agent,
* so there is no risk of collision using strstr().
*/
matches = (strstr(node_summary, search_secure) != NULL);
crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
search_secure, matches? "" : "not ", node_summary);
free(search_secure);
}
return matches;
}
/* Magic string to use as action name for digest cache entries used for
* unfencing checks. This is not a real action name (i.e. "on"), so
* pcmk__check_action_config() won't confuse these entries with real actions.
*/
#define STONITH_DIGEST_TASK "stonith-on"
/*!
* \internal
* \brief Calculate fence device digests and digest comparison result
*
* \param[in,out] rsc Fence device resource
* \param[in] agent Fence device's agent type
* \param[in,out] node Node with digest cache to use
* \param[in,out] scheduler Scheduler data
*
* \return Node's digest cache entry
*/
pcmk__op_digest_t *
pe__compare_fencing_digest(pcmk_resource_t *rsc, const char *agent,
pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
const char *node_summary = NULL;
// Calculate device's current parameter digests
pcmk__op_digest_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
node, NULL, TRUE, scheduler);
// Check whether node has special unfencing summary node attribute
node_summary = pcmk__node_attr(node, CRM_ATTR_DIGESTS_ALL, NULL,
pcmk__rsc_node_current);
if (node_summary == NULL) {
data->rc = pcmk__digest_unknown;
return data;
}
// Check whether full parameter digest matches
if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
node_summary)) {
data->rc = pcmk__digest_match;
return data;
}
// Check whether secure parameter digest matches
node_summary = pcmk__node_attr(node, CRM_ATTR_DIGESTS_SECURE, NULL,
pcmk__rsc_node_current);
if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
node_summary)) {
data->rc = pcmk__digest_match;
if (!pcmk__is_daemon && (scheduler->priv->out != NULL)) {
pcmk__output_t *out = scheduler->priv->out;
out->info(out, "Only 'private' parameters to %s "
"for unfencing %s changed", rsc->id,
pcmk__node_name(node));
}
return data;
}
// Parameters don't match
data->rc = pcmk__digest_mismatch;
if (pcmk_is_set(scheduler->flags, pcmk__sched_sanitized)
&& (data->digest_secure_calc != NULL)) {
if (scheduler->priv->out != NULL) {
pcmk__output_t *out = scheduler->priv->out;
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
out->info(out, "Parameters to %s for unfencing "
"%s changed, try '%s'", rsc->id,
pcmk__node_name(node), digest);
free(digest);
} else if (!pcmk__is_daemon) {
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
printf("Parameters to %s for unfencing %s changed, try '%s'\n",
rsc->id, pcmk__node_name(node), digest);
free(digest);
}
}
return data;
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Jul 8, 3:44 PM (19 h, 22 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1992097
Default Alt Text
(277 KB)

Event Timeline