diff --git a/cts/scheduler/xml/utilization.xml b/cts/scheduler/xml/utilization.xml
index c91e67c190..3270b97785 100644
--- a/cts/scheduler/xml/utilization.xml
+++ b/cts/scheduler/xml/utilization.xml
@@ -1,47 +1,52 @@
-
-
+
+
+
+
+
+
+
diff --git a/daemons/attrd/attrd_attributes.c b/daemons/attrd/attrd_attributes.c
index f1da16a934..516ced794f 100644
--- a/daemons/attrd/attrd_attributes.c
+++ b/daemons/attrd/attrd_attributes.c
@@ -1,187 +1,188 @@
/*
* Copyright 2013-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-attrd.h"
static attribute_t *
attrd_create_attribute(xmlNode *xml)
{
int dampen = 0;
const char *value = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING);
attribute_t *a = calloc(1, sizeof(attribute_t));
CRM_ASSERT(a != NULL);
a->id = crm_element_value_copy(xml, PCMK__XA_ATTR_NAME);
- a->set = crm_element_value_copy(xml, PCMK__XA_ATTR_SET);
+ a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET);
+ a->set_type = crm_element_value_copy(xml, PCMK__XA_ATTR_SET_TYPE);
a->uuid = crm_element_value_copy(xml, PCMK__XA_ATTR_UUID);
a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value);
crm_element_value_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &a->is_private);
a->user = crm_element_value_copy(xml, PCMK__XA_ATTR_USER);
crm_trace("Performing all %s operations as user '%s'", a->id, a->user);
if (value != NULL) {
dampen = crm_get_msec(value);
}
crm_trace("Created attribute %s with %s write delay", a->id,
(a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms));
if(dampen > 0) {
a->timeout_ms = dampen;
a->timer = attrd_add_timer(a->id, a->timeout_ms, a);
} else if (dampen < 0) {
crm_warn("Ignoring invalid delay %s for attribute %s", value, a->id);
}
g_hash_table_replace(attributes, a->id, a);
return a;
}
static int
attrd_update_dampening(attribute_t *a, xmlNode *xml, const char *attr)
{
const char *dvalue = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING);
int dampen = 0;
if (dvalue == NULL) {
crm_warn("Could not update %s: peer did not specify value for delay",
attr);
return EINVAL;
}
dampen = crm_get_msec(dvalue);
if (dampen < 0) {
crm_warn("Could not update %s: invalid delay value %dms (%s)",
attr, dampen, dvalue);
return EINVAL;
}
if (a->timeout_ms != dampen) {
mainloop_timer_del(a->timer);
a->timeout_ms = dampen;
if (dampen > 0) {
a->timer = attrd_add_timer(attr, a->timeout_ms, a);
crm_info("Update attribute %s delay to %dms (%s)",
attr, dampen, dvalue);
} else {
a->timer = NULL;
crm_info("Update attribute %s to remove delay", attr);
}
/* If dampening changed, do an immediate write-out,
* otherwise repeated dampening changes would prevent write-outs
*/
attrd_write_or_elect_attribute(a);
}
return pcmk_rc_ok;
}
GHashTable *attributes = NULL;
/*!
* \internal
* \brief Create an XML representation of an attribute for use in peer messages
*
* \param[in,out] parent Create attribute XML as child element of this
* \param[in] a Attribute to represent
* \param[in] v Attribute value to represent
* \param[in] force_write If true, value should be written even if unchanged
*
* \return XML representation of attribute
*/
xmlNode *
attrd_add_value_xml(xmlNode *parent, const attribute_t *a,
const attribute_value_t *v, bool force_write)
{
xmlNode *xml = create_xml_node(parent, __func__);
crm_xml_add(xml, PCMK__XA_ATTR_NAME, a->id);
- crm_xml_add(xml, PCMK__XA_ATTR_SET, a->set);
+ crm_xml_add(xml, PCMK__XA_ATTR_SET, a->set_id);
crm_xml_add(xml, PCMK__XA_ATTR_UUID, a->uuid);
crm_xml_add(xml, PCMK__XA_ATTR_USER, a->user);
pcmk__xe_add_node(xml, v->nodename, v->nodeid);
if (v->is_remote != 0) {
crm_xml_add_int(xml, PCMK__XA_ATTR_IS_REMOTE, 1);
}
crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current);
crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, a->timeout_ms / 1000);
crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, a->is_private);
crm_xml_add_int(xml, PCMK__XA_ATTR_FORCE, force_write);
return xml;
}
void
attrd_clear_value_seen(void)
{
GHashTableIter aIter;
GHashTableIter vIter;
attribute_t *a;
attribute_value_t *v = NULL;
g_hash_table_iter_init(&aIter, attributes);
while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
g_hash_table_iter_init(&vIter, a->values);
while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) {
v->seen = FALSE;
crm_trace("Clear seen flag %s[%s] = %s.", a->id, v->nodename, v->current);
}
}
}
attribute_t *
attrd_populate_attribute(xmlNode *xml, const char *attr)
{
attribute_t *a = NULL;
bool update_both = false;
const char *op = crm_element_value(xml, PCMK__XA_TASK);
// NULL because PCMK__ATTRD_CMD_SYNC_RESPONSE has no PCMK__XA_TASK
update_both = pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_BOTH,
pcmk__str_null_matches);
// Look up or create attribute entry
a = g_hash_table_lookup(attributes, attr);
if (a == NULL) {
if (update_both || pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE, pcmk__str_none)) {
a = attrd_create_attribute(xml);
} else {
crm_warn("Could not update %s: attribute not found", attr);
return NULL;
}
}
// Update attribute dampening
if (update_both || pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_DELAY, pcmk__str_none)) {
int rc = attrd_update_dampening(a, xml, attr);
if (rc != pcmk_rc_ok || !update_both) {
return NULL;
}
}
return a;
}
diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c
index e80bb0dd50..bd505aff96 100644
--- a/daemons/attrd/attrd_cib.c
+++ b/daemons/attrd/attrd_cib.c
@@ -1,373 +1,380 @@
/*
* Copyright 2013-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-attrd.h"
static int last_cib_op_done = 0;
static gboolean
attribute_timer_cb(gpointer data)
{
attribute_t *a = data;
crm_trace("Dampen interval expired for %s", a->id);
attrd_write_or_elect_attribute(a);
return FALSE;
}
static void
attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data)
{
int level = LOG_ERR;
GHashTableIter iter;
const char *peer = NULL;
attribute_value_t *v = NULL;
char *name = user_data;
attribute_t *a = g_hash_table_lookup(attributes, name);
if(a == NULL) {
crm_info("Attribute %s no longer exists", name);
return;
}
a->update = 0;
if (rc == pcmk_ok && call_id < 0) {
rc = call_id;
}
switch (rc) {
case pcmk_ok:
level = LOG_INFO;
last_cib_op_done = call_id;
if (a->timer && !a->timeout_ms) {
// Remove temporary dampening for failed writes
mainloop_timer_del(a->timer);
a->timer = NULL;
}
break;
case -pcmk_err_diff_failed: /* When an attr changes while the CIB is syncing */
case -ETIME: /* When an attr changes while there is a DC election */
case -ENXIO: /* When an attr changes while the CIB is syncing a
* newer config from a node that just came up
*/
level = LOG_WARNING;
break;
}
do_crm_log(level, "CIB update %d result for %s: %s " CRM_XS " rc=%d",
call_id, a->id, pcmk_strerror(rc), rc);
g_hash_table_iter_init(&iter, a->values);
while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) {
do_crm_log(level, "* %s[%s]=%s", a->id, peer, v->requested);
free(v->requested);
v->requested = NULL;
if (rc != pcmk_ok) {
a->changed = true; /* Attempt write out again */
}
}
if (a->changed && attrd_election_won()) {
if (rc == pcmk_ok) {
/* We deferred a write of a new update because this update was in
* progress. Write out the new value without additional delay.
*/
attrd_write_attribute(a, false);
/* We're re-attempting a write because the original failed; delay
* the next attempt so we don't potentially flood the CIB manager
* and logs with a zillion attempts per second.
*
* @TODO We could elect a new writer instead. However, we'd have to
* somehow downgrade our vote, and we'd still need something like this
* if all peers similarly fail to write this attribute (which may
* indicate a corrupted attribute entry rather than a CIB issue).
*/
} else if (a->timer) {
// Attribute has a dampening value, so use that as delay
if (!mainloop_timer_running(a->timer)) {
crm_trace("Delayed re-attempted write for %s by %s",
name, pcmk__readable_interval(a->timeout_ms));
mainloop_timer_start(a->timer);
}
} else {
/* Set a temporary dampening of 2 seconds (timer will continue
* to exist until the attribute's dampening gets set or the
* write succeeds).
*/
a->timer = attrd_add_timer(a->id, 2000, a);
mainloop_timer_start(a->timer);
}
}
}
static void
build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value)
{
const char *set = NULL;
xmlNode *xml_obj = NULL;
xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE);
crm_xml_add(xml_obj, XML_ATTR_ID, nodeid);
xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS);
crm_xml_add(xml_obj, XML_ATTR_ID, nodeid);
- xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS);
- if (a->set) {
- crm_xml_set_id(xml_obj, "%s", a->set);
+ if (pcmk__str_eq(a->set_type, XML_TAG_ATTR_SETS, pcmk__str_null_matches)) {
+ xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS);
+ } else if (pcmk__str_eq(a->set_type, XML_TAG_UTILIZATION, pcmk__str_none)) {
+ xml_obj = create_xml_node(xml_obj, XML_TAG_UTILIZATION);
+ } else {
+ crm_err("Unknown set type attribute: %s", a->set_type);
+ }
+
+ if (a->set_id) {
+ crm_xml_set_id(xml_obj, "%s", a->set_id);
} else {
crm_xml_set_id(xml_obj, "%s-%s", XML_CIB_TAG_STATUS, nodeid);
}
set = ID(xml_obj);
xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR);
if (a->uuid) {
crm_xml_set_id(xml_obj, "%s", a->uuid);
} else {
crm_xml_set_id(xml_obj, "%s-%s", set, a->id);
}
crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id);
if(value) {
crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value);
} else {
crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, "");
crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE);
}
}
static void
send_alert_attributes_value(attribute_t *a, GHashTable *t)
{
int rc = 0;
attribute_value_t *at = NULL;
GHashTableIter vIter;
g_hash_table_iter_init(&vIter, t);
while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & at)) {
rc = attrd_send_attribute_alert(at->nodename, at->nodeid,
a->id, at->current);
crm_trace("Sent alerts for %s[%s]=%s: nodeid=%d rc=%d",
a->id, at->nodename, at->current, at->nodeid, rc);
}
}
static void
set_alert_attribute_value(GHashTable *t, attribute_value_t *v)
{
attribute_value_t *a_v = NULL;
a_v = calloc(1, sizeof(attribute_value_t));
CRM_ASSERT(a_v != NULL);
a_v->nodeid = v->nodeid;
a_v->nodename = strdup(v->nodename);
pcmk__str_update(&a_v->current, v->current);
g_hash_table_replace(t, a_v->nodename, a_v);
}
mainloop_timer_t *
attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr)
{
return mainloop_timer_add(id, timeout_ms, FALSE, attribute_timer_cb, attr);
}
void
attrd_write_attribute(attribute_t *a, bool ignore_delay)
{
int private_updates = 0, cib_updates = 0;
xmlNode *xml_top = NULL;
attribute_value_t *v = NULL;
GHashTableIter iter;
enum cib_call_options flags = cib_quorum_override;
GHashTable *alert_attribute_value = NULL;
if (a == NULL) {
return;
}
/* If this attribute will be written to the CIB ... */
if (!a->is_private) {
/* Defer the write if now's not a good time */
CRM_CHECK(the_cib != NULL, return);
if (a->update && (a->update < last_cib_op_done)) {
crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update);
a->update = 0; // Don't log this message again
} else if (a->update) {
crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update);
return;
} else if (mainloop_timer_running(a->timer)) {
if (ignore_delay) {
/* 'refresh' forces a write of the current value of all attributes
* Cancel any existing timers, we're writing it NOW
*/
mainloop_timer_stop(a->timer);
crm_debug("Write out of '%s': timer is running but ignore delay", a->id);
} else {
crm_info("Write out of '%s' delayed: timer is running", a->id);
return;
}
}
/* Initialize the status update XML */
xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS);
}
/* Attribute will be written shortly, so clear changed flag */
a->changed = false;
/* We will check all peers' uuids shortly, so initialize this to false */
a->unknown_peer_uuids = false;
/* Attribute will be written shortly, so clear forced write flag */
a->force_write = FALSE;
/* Make the table for the attribute trap */
alert_attribute_value = pcmk__strikey_table(NULL, attrd_free_attribute_value);
/* Iterate over each peer value of this attribute */
g_hash_table_iter_init(&iter, a->values);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) {
crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_ANY);
/* If the value's peer info does not correspond to a peer, ignore it */
if (peer == NULL) {
crm_notice("Cannot update %s[%s]=%s because peer not known",
a->id, v->nodename, v->current);
continue;
}
/* If we're just learning the peer's node id, remember it */
if (peer->id && (v->nodeid == 0)) {
crm_trace("Learned ID %u for node %s", peer->id, v->nodename);
v->nodeid = peer->id;
}
/* If this is a private attribute, no update needs to be sent */
if (a->is_private) {
private_updates++;
continue;
}
/* If the peer is found, but its uuid is unknown, defer write */
if (peer->uuid == NULL) {
a->unknown_peer_uuids = true;
crm_notice("Cannot update %s[%s]=%s because peer UUID not known "
"(will retry if learned)",
a->id, v->nodename, v->current);
continue;
}
/* Add this value to status update XML */
crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID %u/%u)",
a->id, v->nodename, v->current,
peer->uname, peer->uuid, peer->id, v->nodeid);
build_update_element(xml_top, a, peer->uuid, v->current);
cib_updates++;
/* Preservation of the attribute to transmit alert */
set_alert_attribute_value(alert_attribute_value, v);
free(v->requested);
v->requested = NULL;
if (v->current) {
v->requested = strdup(v->current);
} else {
/* Older attrd versions don't know about the cib_mixed_update
* flag so make sure it goes to the local cib which does
*/
cib__set_call_options(flags, crm_system_name,
cib_mixed_update|cib_scope_local);
}
}
if (private_updates) {
crm_info("Processed %d private change%s for %s, id=%s, set=%s",
private_updates, pcmk__plural_s(private_updates),
- a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set, "n/a"));
+ a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a"));
}
if (cib_updates) {
crm_log_xml_trace(xml_top, __func__);
a->update = cib_internal_op(the_cib, PCMK__CIB_REQUEST_MODIFY, NULL,
XML_CIB_TAG_STATUS, xml_top, NULL, flags,
a->user);
crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)",
a->update, cib_updates, pcmk__plural_s(cib_updates),
- a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set, "n/a"));
+ a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a"));
the_cib->cmds->register_callback_full(the_cib, a->update,
CIB_OP_TIMEOUT_S, FALSE,
strdup(a->id),
"attrd_cib_callback",
attrd_cib_callback, free);
/* Transmit alert of the attribute */
send_alert_attributes_value(a, alert_attribute_value);
}
g_hash_table_destroy(alert_attribute_value);
free_xml(xml_top);
}
void
attrd_write_attributes(bool all, bool ignore_delay)
{
GHashTableIter iter;
attribute_t *a = NULL;
crm_debug("Writing out %s attributes", all? "all" : "changed");
g_hash_table_iter_init(&iter, attributes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) {
if (!all && a->unknown_peer_uuids) {
// Try writing this attribute again, in case peer ID was learned
a->changed = true;
} else if (a->force_write) {
/* If the force_write flag is set, write the attribute. */
a->changed = true;
}
if(all || a->changed) {
/* When forced write flag is set, ignore delay. */
attrd_write_attribute(a, (a->force_write ? true : ignore_delay));
} else {
crm_trace("Skipping unchanged attribute %s", a->id);
}
}
}
void
attrd_write_or_elect_attribute(attribute_t *a)
{
if (attrd_election_won()) {
attrd_write_attribute(a, false);
} else {
attrd_start_election_if_needed();
}
}
diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c
index f3a2059d9d..c2962c1499 100644
--- a/daemons/attrd/attrd_utils.c
+++ b/daemons/attrd/attrd_utils.c
@@ -1,340 +1,341 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "pacemaker-attrd.h"
cib_t *the_cib = NULL;
static bool requesting_shutdown = false;
static bool shutting_down = false;
static GMainLoop *mloop = NULL;
/* A hash table storing information on the protocol version of each peer attrd.
* The key is the peer's uname, and the value is the protocol version number.
*/
GHashTable *peer_protocol_vers = NULL;
/*!
* \internal
* \brief Set requesting_shutdown state
*/
void
attrd_set_requesting_shutdown(void)
{
requesting_shutdown = true;
}
/*!
* \internal
* \brief Clear requesting_shutdown state
*/
void
attrd_clear_requesting_shutdown(void)
{
requesting_shutdown = false;
}
/*!
* \internal
* \brief Check whether we're currently requesting shutdown
*
* \return true if requesting shutdown, false otherwise
*/
bool
attrd_requesting_shutdown(void)
{
return requesting_shutdown;
}
/*!
* \internal
* \brief Check whether we're currently shutting down
*
* \return true if shutting down, false otherwise
*/
bool
attrd_shutting_down(void)
{
return shutting_down;
}
/*!
* \internal
* \brief Exit (using mainloop or not, as appropriate)
*
* \param[in] nsig Ignored
*/
void
attrd_shutdown(int nsig)
{
// Tell various functions not to do anthing
shutting_down = true;
// Don't respond to signals while shutting down
mainloop_destroy_signal(SIGTERM);
mainloop_destroy_signal(SIGCHLD);
mainloop_destroy_signal(SIGPIPE);
mainloop_destroy_signal(SIGUSR1);
mainloop_destroy_signal(SIGUSR2);
mainloop_destroy_signal(SIGTRAP);
attrd_free_waitlist();
attrd_free_confirmations();
if (peer_protocol_vers != NULL) {
g_hash_table_destroy(peer_protocol_vers);
peer_protocol_vers = NULL;
}
if ((mloop == NULL) || !g_main_loop_is_running(mloop)) {
/* If there's no main loop active, just exit. This should be possible
* only if we get SIGTERM in brief windows at start-up and shutdown.
*/
crm_exit(CRM_EX_OK);
} else {
g_main_loop_quit(mloop);
g_main_loop_unref(mloop);
}
}
/*!
* \internal
* \brief Create a main loop for attrd
*/
void
attrd_init_mainloop(void)
{
mloop = g_main_loop_new(NULL, FALSE);
}
/*!
* \internal
* \brief Run attrd main loop
*/
void
attrd_run_mainloop(void)
{
g_main_loop_run(mloop);
}
void
attrd_cib_disconnect(void)
{
CRM_CHECK(the_cib != NULL, return);
the_cib->cmds->del_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb);
the_cib->cmds->del_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb);
cib__clean_up_connection(&the_cib);
}
void
attrd_cib_replaced_cb(const char *event, xmlNode * msg)
{
int change_section = cib_change_section_nodes | cib_change_section_status | cib_change_section_alerts;
if (attrd_requesting_shutdown() || attrd_shutting_down()) {
return;
}
crm_element_value_int(msg, F_CIB_CHANGE_SECTION, &change_section);
if (attrd_election_won()) {
if (change_section & (cib_change_section_nodes | cib_change_section_status)) {
crm_notice("Updating all attributes after %s event", event);
attrd_write_attributes(true, false);
}
}
if (change_section & cib_change_section_alerts) {
// Check for changes in alerts
mainloop_set_trigger(attrd_config_read);
}
}
/* strlen("value") */
#define plus_plus_len (5)
/*!
* \internal
* \brief Check whether an attribute value should be expanded
*
* \param[in] value Attribute value to check
*
* \return true if value needs expansion, false otherwise
*/
bool
attrd_value_needs_expansion(const char *value)
{
return ((strlen(value) >= (plus_plus_len + 2))
&& (value[plus_plus_len] == '+')
&& ((value[plus_plus_len + 1] == '+')
|| (value[plus_plus_len + 1] == '=')));
}
/*!
* \internal
* \brief Expand an increment expression into an integer
*
* \param[in] value Attribute increment expression to expand
* \param[in] old_value Previous value of attribute
*
* \return Expanded value
*/
int
attrd_expand_value(const char *value, const char *old_value)
{
int offset = 1;
int int_value = char2score(old_value);
if (value[plus_plus_len + 1] != '+') {
const char *offset_s = value + (plus_plus_len + 2);
offset = char2score(offset_s);
}
int_value += offset;
if (int_value > INFINITY) {
int_value = INFINITY;
}
return int_value;
}
/*!
* \internal
* \brief Create regular expression matching failure-related attributes
*
* \param[out] regex Where to store created regular expression
* \param[in] rsc Name of resource to clear (or NULL for all)
* \param[in] op Operation to clear if rsc is specified (or NULL for all)
* \param[in] interval_ms Interval of operation to clear if op is specified
*
* \return pcmk_ok on success, -EINVAL if arguments are invalid
*
* \note The caller is responsible for freeing the result with regfree().
*/
int
attrd_failure_regex(regex_t *regex, const char *rsc, const char *op,
guint interval_ms)
{
char *pattern = NULL;
int rc;
/* Create a pattern that matches desired attributes */
if (rsc == NULL) {
pattern = strdup(ATTRD_RE_CLEAR_ALL);
} else if (op == NULL) {
pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc);
} else {
pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP, rsc, op, interval_ms);
}
/* Compile pattern into regular expression */
crm_trace("Clearing attributes matching %s", pattern);
rc = regcomp(regex, pattern, REG_EXTENDED|REG_NOSUB);
free(pattern);
return (rc == 0)? pcmk_ok : -EINVAL;
}
void
attrd_free_attribute_value(gpointer data)
{
attribute_value_t *v = data;
free(v->nodename);
free(v->current);
free(v->requested);
free(v);
}
void
attrd_free_attribute(gpointer data)
{
attribute_t *a = data;
if(a) {
free(a->id);
- free(a->set);
+ free(a->set_id);
+ free(a->set_type);
free(a->uuid);
free(a->user);
mainloop_timer_del(a->timer);
g_hash_table_destroy(a->values);
free(a);
}
}
/*!
* \internal
* \brief When a peer node leaves the cluster, stop tracking its protocol version.
*
* \param[in] host The peer node's uname to be removed
*/
void
attrd_remove_peer_protocol_ver(const char *host)
{
if (peer_protocol_vers != NULL) {
g_hash_table_remove(peer_protocol_vers, host);
}
}
/*!
* \internal
* \brief When a peer node broadcasts a message with its protocol version, keep
* track of that information.
*
* We keep track of each peer's protocol version so we know which peers to
* expect confirmation messages from when handling cluster-wide sync points.
* We additionally keep track of the lowest protocol version supported by all
* peers so we know when we can send IPC messages containing more than one
* request.
*
* \param[in] host The peer node's uname to be tracked
* \param[in] value The peer node's protocol version
*/
void
attrd_update_minimum_protocol_ver(const char *host, const char *value)
{
int ver;
if (peer_protocol_vers == NULL) {
peer_protocol_vers = pcmk__strkey_table(free, NULL);
}
pcmk__scan_min_int(value, &ver, 0);
if (ver > 0) {
char *host_name = strdup(host);
/* Record the peer attrd's protocol version. */
CRM_ASSERT(host_name != NULL);
g_hash_table_insert(peer_protocol_vers, host_name, GINT_TO_POINTER(ver));
/* If the protocol version is a new minimum, record it as such. */
if (minimum_protocol_version == -1 || ver < minimum_protocol_version) {
minimum_protocol_version = ver;
crm_trace("Set minimum attrd protocol version to %d",
minimum_protocol_version);
}
}
}
diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h
index 83d7c6b0c5..df88c4c16f 100644
--- a/daemons/attrd/pacemaker-attrd.h
+++ b/daemons/attrd/pacemaker-attrd.h
@@ -1,211 +1,212 @@
/*
* Copyright 2013-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#ifndef PACEMAKER_ATTRD__H
# define PACEMAKER_ATTRD__H
#include
#include
#include
#include
#include
#include
#include
/*
* Legacy attrd (all pre-1.1.11 Pacemaker versions, plus all versions when used
* with the no-longer-supported CMAN or corosync-plugin stacks) is unversioned.
*
* With atomic attrd, each attrd will send ATTRD_PROTOCOL_VERSION with every
* peer request and reply. As of Pacemaker 2.0.0, at start-up each attrd will
* also set a private attribute for itself with its version, so any attrd can
* determine the minimum version supported by all peers.
*
* Protocol Pacemaker Significant changes
* -------- --------- -------------------
* 1 1.1.11 PCMK__ATTRD_CMD_UPDATE (PCMK__XA_ATTR_NAME only),
* PCMK__ATTRD_CMD_PEER_REMOVE, PCMK__ATTRD_CMD_REFRESH,
* PCMK__ATTRD_CMD_FLUSH, PCMK__ATTRD_CMD_SYNC,
* PCMK__ATTRD_CMD_SYNC_RESPONSE
* 1 1.1.13 PCMK__ATTRD_CMD_UPDATE (with PCMK__XA_ATTR_PATTERN),
* PCMK__ATTRD_CMD_QUERY
* 1 1.1.15 PCMK__ATTRD_CMD_UPDATE_BOTH,
* PCMK__ATTRD_CMD_UPDATE_DELAY
* 2 1.1.17 PCMK__ATTRD_CMD_CLEAR_FAILURE
* 3 2.1.1 PCMK__ATTRD_CMD_SYNC_RESPONSE indicates remote nodes
* 4 2.1.5 Multiple attributes can be updated in a single IPC
* message
* 5 2.1.5 Peers can request confirmation of a sent message
*/
#define ATTRD_PROTOCOL_VERSION "5"
#define ATTRD_SUPPORTS_MULTI_MESSAGE(x) ((x) >= 4)
#define ATTRD_SUPPORTS_CONFIRMATION(x) ((x) >= 5)
#define attrd_send_ack(client, id, flags) \
pcmk__ipc_send_ack((client), (id), (flags), "ack", ATTRD_PROTOCOL_VERSION, CRM_EX_INDETERMINATE)
void attrd_init_mainloop(void);
void attrd_run_mainloop(void);
void attrd_set_requesting_shutdown(void);
void attrd_clear_requesting_shutdown(void);
void attrd_free_waitlist(void);
bool attrd_requesting_shutdown(void);
bool attrd_shutting_down(void);
void attrd_shutdown(int nsig);
void attrd_init_ipc(void);
void attrd_ipc_fini(void);
void attrd_cib_disconnect(void);
bool attrd_value_needs_expansion(const char *value);
int attrd_expand_value(const char *value, const char *old_value);
/* regular expression to clear failures of all resources */
#define ATTRD_RE_CLEAR_ALL \
"^(" PCMK__FAIL_COUNT_PREFIX "|" PCMK__LAST_FAILURE_PREFIX ")-"
/* regular expression to clear failure of all operations for one resource
* (format takes resource name)
*
* @COMPAT attributes set < 1.1.17:
* also match older attributes that do not have the operation part
*/
#define ATTRD_RE_CLEAR_ONE ATTRD_RE_CLEAR_ALL "%s(#.+_[0-9]+)?$"
/* regular expression to clear failure of one operation for one resource
* (format takes resource name, operation name, and interval)
*
* @COMPAT attributes set < 1.1.17:
* also match older attributes that do not have the operation part
*/
#define ATTRD_RE_CLEAR_OP ATTRD_RE_CLEAR_ALL "%s(#%s_%u)?$"
int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op,
guint interval_ms);
extern cib_t *the_cib;
/* Alerts */
extern lrmd_t *the_lrmd;
extern crm_trigger_t *attrd_config_read;
void attrd_lrmd_disconnect(void);
gboolean attrd_read_options(gpointer user_data);
void attrd_cib_replaced_cb(const char *event, xmlNode * msg);
void attrd_cib_updated_cb(const char *event, xmlNode *msg);
int attrd_send_attribute_alert(const char *node, int nodeid,
const char *attr, const char *value);
// Elections
void attrd_election_init(void);
void attrd_election_fini(void);
void attrd_start_election_if_needed(void);
bool attrd_election_won(void);
void attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml);
bool attrd_check_for_new_writer(const crm_node_t *peer, const xmlNode *xml);
void attrd_declare_winner(void);
void attrd_remove_voter(const crm_node_t *peer);
void attrd_xml_add_writer(xmlNode *xml);
typedef struct attribute_s {
char *uuid; /* TODO: Remove if at all possible */
char *id;
- char *set;
+ char *set_id;
+ char *set_type;
GHashTable *values;
int update;
int timeout_ms;
/* TODO: refactor these three as a bitmask */
bool changed; /* whether attribute value has changed since last write */
bool unknown_peer_uuids; /* whether we know we're missing a peer uuid */
gboolean is_private; /* whether to keep this attribute out of the CIB */
mainloop_timer_t *timer;
char *user;
gboolean force_write; /* Flag for updating attribute by ignoring delay */
} attribute_t;
typedef struct attribute_value_s {
uint32_t nodeid;
gboolean is_remote;
char *nodename;
char *current;
char *requested;
gboolean seen;
} attribute_value_t;
extern crm_cluster_t *attrd_cluster;
extern GHashTable *attributes;
extern GHashTable *peer_protocol_vers;
#define CIB_OP_TIMEOUT_S 120
int attrd_cluster_connect(void);
void attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host,
bool filter);
void attrd_peer_sync(crm_node_t *peer, xmlNode *xml);
void attrd_peer_remove(const char *host, bool uncache, const char *source);
void attrd_peer_clear_failure(pcmk__request_t *request);
void attrd_peer_sync_response(const crm_node_t *peer, bool peer_won,
xmlNode *xml);
void attrd_broadcast_protocol(void);
xmlNode *attrd_client_peer_remove(pcmk__request_t *request);
xmlNode *attrd_client_clear_failure(pcmk__request_t *request);
xmlNode *attrd_client_update(pcmk__request_t *request);
xmlNode *attrd_client_refresh(pcmk__request_t *request);
xmlNode *attrd_client_query(pcmk__request_t *request);
gboolean attrd_send_message(crm_node_t *node, xmlNode *data, bool confirm);
xmlNode *attrd_add_value_xml(xmlNode *parent, const attribute_t *a,
const attribute_value_t *v, bool force_write);
void attrd_clear_value_seen(void);
void attrd_free_attribute(gpointer data);
void attrd_free_attribute_value(gpointer data);
attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr);
void attrd_write_attribute(attribute_t *a, bool ignore_delay);
void attrd_write_attributes(bool all, bool ignore_delay);
void attrd_write_or_elect_attribute(attribute_t *a);
extern int minimum_protocol_version;
void attrd_remove_peer_protocol_ver(const char *host);
void attrd_update_minimum_protocol_ver(const char *host, const char *value);
mainloop_timer_t *attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr);
void attrd_unregister_handlers(void);
void attrd_handle_request(pcmk__request_t *request);
enum attrd_sync_point {
attrd_sync_point_local,
attrd_sync_point_cluster,
};
typedef int (*attrd_confirmation_action_fn)(xmlNode *);
void attrd_add_client_to_waitlist(pcmk__request_t *request);
void attrd_ack_waitlist_clients(enum attrd_sync_point sync_point, const xmlNode *xml);
int attrd_cluster_sync_point_update(xmlNode *xml);
void attrd_do_not_expect_from_peer(const char *host);
void attrd_do_not_wait_for_client(pcmk__client_t *client);
void attrd_expect_confirmations(pcmk__request_t *request, attrd_confirmation_action_fn fn);
void attrd_free_confirmations(void);
void attrd_handle_confirmation(int callid, const char *host);
void attrd_remove_client_from_waitlist(pcmk__client_t *client);
const char *attrd_request_sync_point(xmlNode *xml);
bool attrd_request_has_sync_point(xmlNode *xml);
#endif /* PACEMAKER_ATTRD__H */
diff --git a/doc/sphinx/Pacemaker_Explained/utilization.rst b/doc/sphinx/Pacemaker_Explained/utilization.rst
index fe18fac213..93c67cdf31 100644
--- a/doc/sphinx/Pacemaker_Explained/utilization.rst
+++ b/doc/sphinx/Pacemaker_Explained/utilization.rst
@@ -1,249 +1,264 @@
.. _utilization:
Utilization and Placement Strategy
----------------------------------
Pacemaker decides where to place a resource according to the resource
allocation scores on every node. The resource will be allocated to the
node where the resource has the highest score.
If the resource allocation scores on all the nodes are equal, by the default
placement strategy, Pacemaker will choose a node with the least number of
allocated resources for balancing the load. If the number of resources on each
node is equal, the first eligible node listed in the CIB will be chosen to run
the resource.
Often, in real-world situations, different resources use significantly
different proportions of a node's capacities (memory, I/O, etc.).
We cannot balance the load ideally just according to the number of resources
allocated to a node. Besides, if resources are placed such that their combined
requirements exceed the provided capacity, they may fail to start completely or
run with degraded performance.
To take these factors into account, Pacemaker allows you to configure:
#. The capacity a certain node provides.
#. The capacity a certain resource requires.
#. An overall strategy for placement of resources.
Utilization attributes
######################
To configure the capacity that a node provides or a resource requires,
you can use *utilization attributes* in ``node`` and ``resource`` objects.
You can name utilization attributes according to your preferences and define as
many name/value pairs as your configuration needs. However, the attributes'
values must be integers.
-.. topic: Specifying CPU and RAM capacities of two nodes
+.. topic:: Specifying CPU and RAM capacities of two nodes
.. code-block:: xml
.. topic:: Specifying CPU and RAM consumed by several resources
.. code-block:: xml
A node is considered eligible for a resource if it has sufficient free
capacity to satisfy the resource's requirements. The nature of the required
or provided capacities is completely irrelevant to Pacemaker -- it just makes
sure that all capacity requirements of a resource are satisfied before placing
a resource to a node.
+Utilization attributes used on a node object can also be *transient* *(since 2.1.6)*.
+These attributes are added to a ``transient_attributes`` section for the node
+and are forgotten by the cluster when the node goes offline. The ``attrd_updater``
+tool can be used to set these attributes.
+
+.. topic:: Transient utilization attribute for node cluster-1
+
+ .. code-block:: xml
+
+
+
+
+
+
+
.. note::
Utilization is supported for bundles *(since 2.1.3)*, but only for bundles
with an inner primitive. Any resource utilization values should be specified
for the inner primitive, but any priority meta-attribute should be specified
for the outer bundle.
Placement Strategy
##################
After you have configured the capacities your nodes provide and the
capacities your resources require, you need to set the ``placement-strategy``
in the global cluster options, otherwise the capacity configurations have
*no effect*.
Four values are available for the ``placement-strategy``:
* **default**
Utilization values are not taken into account at all.
Resources are allocated according to allocation scores. If scores are equal,
resources are evenly distributed across nodes.
* **utilization**
Utilization values are taken into account *only* when deciding whether a node
is considered eligible (i.e. whether it has sufficient free capacity to satisfy
the resource's requirements). Load-balancing is still done based on the
number of resources allocated to a node.
* **balanced**
Utilization values are taken into account when deciding whether a node
is eligible to serve a resource *and* when load-balancing, so an attempt is
made to spread the resources in a way that optimizes resource performance.
* **minimal**
Utilization values are taken into account *only* when deciding whether a node
is eligible to serve a resource. For load-balancing, an attempt is made to
concentrate the resources on as few nodes as possible, thereby enabling
possible power savings on the remaining nodes.
Set ``placement-strategy`` with ``crm_attribute``:
.. code-block:: none
# crm_attribute --name placement-strategy --update balanced
Now Pacemaker will ensure the load from your resources will be distributed
evenly throughout the cluster, without the need for convoluted sets of
colocation constraints.
Allocation Details
##################
Which node is preferred to get consumed first when allocating resources?
________________________________________________________________________
* The node with the highest node weight gets consumed first. Node weight
is a score maintained by the cluster to represent node health.
* If multiple nodes have the same node weight:
* If ``placement-strategy`` is ``default`` or ``utilization``,
the node that has the least number of allocated resources gets consumed first.
* If their numbers of allocated resources are equal,
the first eligible node listed in the CIB gets consumed first.
* If ``placement-strategy`` is ``balanced``,
the node that has the most free capacity gets consumed first.
* If the free capacities of the nodes are equal,
the node that has the least number of allocated resources gets consumed first.
* If their numbers of allocated resources are equal,
the first eligible node listed in the CIB gets consumed first.
* If ``placement-strategy`` is ``minimal``,
the first eligible node listed in the CIB gets consumed first.
Which node has more free capacity?
__________________________________
If only one type of utilization attribute has been defined, free capacity
is a simple numeric comparison.
If multiple types of utilization attributes have been defined, then
the node that is numerically highest in the the most attribute types
has the most free capacity. For example:
* If ``nodeA`` has more free ``cpus``, and ``nodeB`` has more free ``memory``,
then their free capacities are equal.
* If ``nodeA`` has more free ``cpus``, while ``nodeB`` has more free ``memory``
and ``storage``, then ``nodeB`` has more free capacity.
Which resource is preferred to be assigned first?
_________________________________________________
* The resource that has the highest ``priority`` (see :ref:`resource_options`) gets
allocated first.
* If their priorities are equal, check whether they are already running. The
resource that has the highest score on the node where it's running gets allocated
first, to prevent resource shuffling.
* If the scores above are equal or the resources are not running, the resource has
the highest score on the preferred node gets allocated first.
* If the scores above are equal, the first runnable resource listed in the CIB
gets allocated first.
Limitations and Workarounds
###########################
The type of problem Pacemaker is dealing with here is known as the
`knapsack problem `_ and falls into
the `NP-complete `_ category of computer
science problems -- a fancy way of saying "it takes a really long time
to solve".
Clearly in a HA cluster, it's not acceptable to spend minutes, let alone hours
or days, finding an optimal solution while services remain unavailable.
So instead of trying to solve the problem completely, Pacemaker uses a
*best effort* algorithm for determining which node should host a particular
service. This means it arrives at a solution much faster than traditional
linear programming algorithms, but by doing so at the price of leaving some
services stopped.
In the contrived example at the start of this chapter:
* ``rsc-small`` would be allocated to ``node1``
* ``rsc-medium`` would be allocated to ``node2``
* ``rsc-large`` would remain inactive
Which is not ideal.
There are various approaches to dealing with the limitations of
pacemaker's placement strategy:
* **Ensure you have sufficient physical capacity.**
It might sound obvious, but if the physical capacity of your nodes is (close to)
maxed out by the cluster under normal conditions, then failover isn't going to
go well. Even without the utilization feature, you'll start hitting timeouts and
getting secondary failures.
* **Build some buffer into the capabilities advertised by the nodes.**
Advertise slightly more resources than we physically have, on the (usually valid)
assumption that a resource will not use 100% of the configured amount of
CPU, memory and so forth *all* the time. This practice is sometimes called *overcommit*.
* **Specify resource priorities.**
If the cluster is going to sacrifice services, it should be the ones you care
about (comparatively) the least. Ensure that resource priorities are properly set
so that your most important resources are scheduled first.
diff --git a/include/crm/common/attrd_internal.h b/include/crm/common/attrd_internal.h
index 389be48570..9af5f8c392 100644
--- a/include/crm/common/attrd_internal.h
+++ b/include/crm/common/attrd_internal.h
@@ -1,48 +1,49 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef PCMK__ATTRD_INTERNAL__H
# define PCMK__ATTRD_INTERNAL__H
#ifdef __cplusplus
extern "C" {
#endif
// Options for clients to use with functions below
enum pcmk__node_attr_opts {
pcmk__node_attr_none = 0,
pcmk__node_attr_remote = (1 << 0),
pcmk__node_attr_private = (1 << 1),
pcmk__node_attr_pattern = (1 << 2),
pcmk__node_attr_value = (1 << 3),
pcmk__node_attr_delay = (1 << 4),
pcmk__node_attr_perm = (1 << 5),
pcmk__node_attr_sync_local = (1 << 6),
pcmk__node_attr_sync_cluster = (1 << 7),
+ pcmk__node_attr_utilization = (1 << 8),
};
#define pcmk__set_node_attr_flags(node_attr_flags, flags_to_set) do { \
node_attr_flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Node attribute", crm_system_name, \
(node_attr_flags), (flags_to_set), #flags_to_set); \
} while (0)
#define pcmk__clear_node_attr_flags(node_attr_flags, flags_to_clear) do { \
node_attr_flags = pcmk__clear_flags_as(__func__, __LINE__, \
LOG_TRACE, "Node attribute", crm_system_name, \
(node_attr_flags), (flags_to_clear), #flags_to_clear); \
} while (0)
const char *pcmk__node_attr_target(const char *name);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/include/crm_internal.h b/include/crm_internal.h
index f60e7b4532..b2708abc15 100644
--- a/include/crm_internal.h
+++ b/include/crm_internal.h
@@ -1,115 +1,116 @@
/*
* Copyright 2006-2021 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef CRM_INTERNAL__H
# define CRM_INTERNAL__H
# ifndef PCMK__CONFIG_H
# define PCMK__CONFIG_H
# include
# endif
# include
/* Our minimum glib dependency is 2.42. Define that as both the minimum and
* maximum glib APIs that are allowed (i.e. APIs that were already deprecated
* in 2.42, and APIs introduced after 2.42, cannot be used by Pacemaker code).
*/
#define GLIB_VERSION_MIN_REQUIRED GLIB_VERSION_2_42
#define GLIB_VERSION_MAX_ALLOWED GLIB_VERSION_2_42
# include
# include
# include
/* Public API headers can guard including deprecated API headers with this
* symbol, thus preventing internal code (which includes this header) from using
* deprecated APIs, while still allowing external code to use them by default.
*/
#define PCMK_ALLOW_DEPRECATED 0
# include
# include
# include
# include
# include
# include
# include
# include
# include
# include
#define N_(String) (String)
#ifdef ENABLE_NLS
# define _(String) gettext(String)
#else
# define _(String) (String)
#endif
/*
* XML attribute names used only by internal code
*/
#define PCMK__XA_ATTR_DAMPENING "attr_dampening"
#define PCMK__XA_ATTR_FORCE "attrd_is_force_write"
#define PCMK__XA_ATTR_INTERVAL "attr_clear_interval"
#define PCMK__XA_ATTR_IS_PRIVATE "attr_is_private"
#define PCMK__XA_ATTR_IS_REMOTE "attr_is_remote"
#define PCMK__XA_ATTR_NAME "attr_name"
#define PCMK__XA_ATTR_NODE_ID "attr_host_id"
#define PCMK__XA_ATTR_NODE_NAME "attr_host"
#define PCMK__XA_ATTR_OPERATION "attr_clear_operation"
#define PCMK__XA_ATTR_PATTERN "attr_regex"
#define PCMK__XA_ATTR_RESOURCE "attr_resource"
#define PCMK__XA_ATTR_SECTION "attr_section"
#define PCMK__XA_ATTR_SET "attr_set"
+#define PCMK__XA_ATTR_SET_TYPE "attr_set_type"
#define PCMK__XA_ATTR_SYNC_POINT "attr_sync_point"
#define PCMK__XA_ATTR_USER "attr_user"
#define PCMK__XA_ATTR_UUID "attr_key"
#define PCMK__XA_ATTR_VALUE "attr_value"
#define PCMK__XA_ATTR_VERSION "attr_version"
#define PCMK__XA_ATTR_WRITER "attr_writer"
#define PCMK__XA_CONFIG_ERRORS "config-errors"
#define PCMK__XA_CONFIG_WARNINGS "config-warnings"
#define PCMK__XA_CONFIRM "confirm"
#define PCMK__XA_GRAPH_ERRORS "graph-errors"
#define PCMK__XA_GRAPH_WARNINGS "graph-warnings"
#define PCMK__XA_MODE "mode"
#define PCMK__XA_TASK "task"
/*
* IPC service names that are only used internally
*/
# define PCMK__SERVER_BASED_RO "cib_ro"
# define PCMK__SERVER_BASED_RW "cib_rw"
# define PCMK__SERVER_BASED_SHM "cib_shm"
/*
* IPC commands that can be sent to Pacemaker daemons
*/
#define PCMK__ATTRD_CMD_PEER_REMOVE "peer-remove"
#define PCMK__ATTRD_CMD_UPDATE "update"
#define PCMK__ATTRD_CMD_UPDATE_BOTH "update-both"
#define PCMK__ATTRD_CMD_UPDATE_DELAY "update-delay"
#define PCMK__ATTRD_CMD_QUERY "query"
#define PCMK__ATTRD_CMD_REFRESH "refresh"
#define PCMK__ATTRD_CMD_FLUSH "flush"
#define PCMK__ATTRD_CMD_SYNC "sync"
#define PCMK__ATTRD_CMD_SYNC_RESPONSE "sync-response"
#define PCMK__ATTRD_CMD_CLEAR_FAILURE "clear-failure"
#define PCMK__ATTRD_CMD_CONFIRM "confirm"
#define PCMK__CONTROLD_CMD_NODES "list-nodes"
#endif /* CRM_INTERNAL__H */
diff --git a/lib/common/ipc_attrd.c b/lib/common/ipc_attrd.c
index 460650994c..00e2aff745 100644
--- a/lib/common/ipc_attrd.c
+++ b/lib/common/ipc_attrd.c
@@ -1,580 +1,586 @@
/*
* Copyright 2011-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include
#include
#include
#include
#include
#include
#include
#include "crmcommon_private.h"
static void
set_pairs_data(pcmk__attrd_api_reply_t *data, xmlNode *msg_data)
{
const char *name = NULL;
pcmk__attrd_query_pair_t *pair;
name = crm_element_value(msg_data, PCMK__XA_ATTR_NAME);
for (xmlNode *node = first_named_child(msg_data, XML_CIB_TAG_NODE);
node != NULL; node = crm_next_same_xml(node)) {
pair = calloc(1, sizeof(pcmk__attrd_query_pair_t));
CRM_ASSERT(pair != NULL);
pair->node = crm_element_value(node, PCMK__XA_ATTR_NODE_NAME);
pair->name = name;
pair->value = crm_element_value(node, PCMK__XA_ATTR_VALUE);
data->data.pairs = g_list_prepend(data->data.pairs, pair);
}
}
static bool
reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
{
const char *command = crm_element_value(request, PCMK__XA_TASK);
return pcmk__str_any_of(command,
PCMK__ATTRD_CMD_CLEAR_FAILURE,
PCMK__ATTRD_CMD_QUERY,
PCMK__ATTRD_CMD_REFRESH,
PCMK__ATTRD_CMD_UPDATE,
PCMK__ATTRD_CMD_UPDATE_BOTH,
PCMK__ATTRD_CMD_UPDATE_DELAY,
NULL);
}
static bool
dispatch(pcmk_ipc_api_t *api, xmlNode *reply)
{
const char *value = NULL;
crm_exit_t status = CRM_EX_OK;
pcmk__attrd_api_reply_t reply_data = {
pcmk__attrd_reply_unknown
};
if (pcmk__str_eq((const char *) reply->name, "ack", pcmk__str_none)) {
return false;
}
/* Do some basic validation of the reply */
value = crm_element_value(reply, F_TYPE);
if (pcmk__str_empty(value)
|| !pcmk__str_eq(value, T_ATTRD, pcmk__str_none)) {
crm_info("Unrecognizable message from attribute manager: "
"message type '%s' not '" T_ATTRD "'", pcmk__s(value, ""));
status = CRM_EX_PROTOCOL;
goto done;
}
value = crm_element_value(reply, F_SUBTYPE);
/* Only the query command gets a reply for now. NULL counts as query for
* backward compatibility with attribute managers <2.1.3 that didn't set it.
*/
if (pcmk__str_eq(value, PCMK__ATTRD_CMD_QUERY, pcmk__str_null_matches)) {
if (!xmlHasProp(reply, (pcmkXmlStr) PCMK__XA_ATTR_NAME)) {
status = ENXIO; // Most likely, the attribute doesn't exist
goto done;
}
reply_data.reply_type = pcmk__attrd_reply_query;
set_pairs_data(&reply_data, reply);
} else {
crm_info("Unrecognizable message from attribute manager: "
"message subtype '%s' unknown", pcmk__s(value, ""));
status = CRM_EX_PROTOCOL;
goto done;
}
done:
pcmk__call_ipc_callback(api, pcmk_ipc_event_reply, status, &reply_data);
/* Free any reply data that was allocated */
if (reply_data.data.pairs) {
g_list_free_full(reply_data.data.pairs, free);
}
return false;
}
pcmk__ipc_methods_t *
pcmk__attrd_api_methods(void)
{
pcmk__ipc_methods_t *cmds = calloc(1, sizeof(pcmk__ipc_methods_t));
if (cmds != NULL) {
cmds->new_data = NULL;
cmds->free_data = NULL;
cmds->post_connect = NULL;
cmds->reply_expected = reply_expected;
cmds->dispatch = dispatch;
}
return cmds;
}
/*!
* \internal
* \brief Create a generic pacemaker-attrd operation
*
* \param[in] user_name If not NULL, ACL user to set for operation
*
* \return XML of pacemaker-attrd operation
*/
static xmlNode *
create_attrd_op(const char *user_name)
{
xmlNode *attrd_op = create_xml_node(NULL, __func__);
crm_xml_add(attrd_op, F_TYPE, T_ATTRD);
crm_xml_add(attrd_op, F_ORIG, (crm_system_name? crm_system_name: "unknown"));
crm_xml_add(attrd_op, PCMK__XA_ATTR_USER, user_name);
return attrd_op;
}
static int
create_api(pcmk_ipc_api_t **api)
{
int rc = pcmk_new_ipc_api(api, pcmk_ipc_attrd);
if (rc != pcmk_rc_ok) {
crm_err("Could not connect to attrd: %s", pcmk_rc_str(rc));
}
return rc;
}
static void
destroy_api(pcmk_ipc_api_t *api)
{
pcmk_disconnect_ipc(api);
pcmk_free_ipc_api(api);
api = NULL;
}
static int
connect_and_send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request)
{
int rc = pcmk_rc_ok;
int max = 5;
while (max > 0) {
crm_info("Connecting to cluster... %d retries remaining", max);
rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_sync);
if (rc == pcmk_rc_ok) {
rc = pcmk__send_ipc_request(api, request);
break;
} else if (rc == EAGAIN || rc == EALREADY) {
sleep(5 - max);
max--;
} else {
crm_err("Could not connect to attrd: %s", pcmk_rc_str(rc));
break;
}
}
return rc;
}
static int
send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request)
{
return pcmk__send_ipc_request(api, request);
}
int
pcmk__attrd_api_clear_failures(pcmk_ipc_api_t *api, const char *node,
const char *resource, const char *operation,
const char *interval_spec, const char *user_name,
uint32_t options)
{
int rc = pcmk_rc_ok;
xmlNode *request = create_attrd_op(user_name);
const char *interval_desc = NULL;
const char *op_desc = NULL;
const char *target = pcmk__node_attr_target(node);
if (target != NULL) {
node = target;
}
crm_xml_add(request, PCMK__XA_TASK, PCMK__ATTRD_CMD_CLEAR_FAILURE);
pcmk__xe_add_node(request, node, 0);
crm_xml_add(request, PCMK__XA_ATTR_RESOURCE, resource);
crm_xml_add(request, PCMK__XA_ATTR_OPERATION, operation);
crm_xml_add(request, PCMK__XA_ATTR_INTERVAL, interval_spec);
crm_xml_add_int(request, PCMK__XA_ATTR_IS_REMOTE,
pcmk_is_set(options, pcmk__node_attr_remote));
if (api == NULL) {
rc = create_api(&api);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = connect_and_send_attrd_request(api, request);
destroy_api(api);
} else if (!pcmk_ipc_is_connected(api)) {
rc = connect_and_send_attrd_request(api, request);
} else {
rc = send_attrd_request(api, request);
}
free_xml(request);
if (operation) {
interval_desc = interval_spec? interval_spec : "nonrecurring";
op_desc = operation;
} else {
interval_desc = "all";
op_desc = "operations";
}
crm_debug("Asked pacemaker-attrd to clear failure of %s %s for %s on %s: %s (%d)",
interval_desc, op_desc, (resource? resource : "all resources"),
(node? node : "all nodes"), pcmk_rc_str(rc), rc);
return rc;
}
int
pcmk__attrd_api_delete(pcmk_ipc_api_t *api, const char *node, const char *name,
uint32_t options)
{
const char *target = NULL;
if (name == NULL) {
return EINVAL;
}
target = pcmk__node_attr_target(node);
if (target != NULL) {
node = target;
}
/* Make sure the right update option is set. */
options &= ~pcmk__node_attr_delay;
options |= pcmk__node_attr_value;
return pcmk__attrd_api_update(api, node, name, NULL, NULL, NULL, NULL, options);
}
int
pcmk__attrd_api_purge(pcmk_ipc_api_t *api, const char *node)
{
int rc = pcmk_rc_ok;
xmlNode *request = NULL;
const char *display_host = (node ? node : "localhost");
const char *target = pcmk__node_attr_target(node);
if (target != NULL) {
node = target;
}
request = create_attrd_op(NULL);
crm_xml_add(request, PCMK__XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE);
pcmk__xe_add_node(request, node, 0);
if (api == NULL) {
rc = create_api(&api);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = connect_and_send_attrd_request(api, request);
destroy_api(api);
} else if (!pcmk_ipc_is_connected(api)) {
rc = connect_and_send_attrd_request(api, request);
} else {
rc = send_attrd_request(api, request);
}
free_xml(request);
crm_debug("Asked pacemaker-attrd to purge %s: %s (%d)",
display_host, pcmk_rc_str(rc), rc);
return rc;
}
int
pcmk__attrd_api_query(pcmk_ipc_api_t *api, const char *node, const char *name,
uint32_t options)
{
int rc = pcmk_rc_ok;
xmlNode *request = NULL;
const char *target = NULL;
if (name == NULL) {
return EINVAL;
}
target = pcmk__node_attr_target(node);
if (target != NULL) {
node = target;
}
request = create_attrd_op(NULL);
crm_xml_add(request, PCMK__XA_ATTR_NAME, name);
crm_xml_add(request, PCMK__XA_TASK, PCMK__ATTRD_CMD_QUERY);
pcmk__xe_add_node(request, node, 0);
rc = send_attrd_request(api, request);
free_xml(request);
if (node) {
crm_debug("Queried pacemaker-attrd for %s on %s: %s (%d)",
name, node, pcmk_rc_str(rc), rc);
} else {
crm_debug("Queried pacemaker-attrd for %s: %s (%d)",
name, pcmk_rc_str(rc), rc);
}
return rc;
}
int
pcmk__attrd_api_refresh(pcmk_ipc_api_t *api, const char *node)
{
int rc = pcmk_rc_ok;
xmlNode *request = NULL;
const char *display_host = (node ? node : "localhost");
const char *target = pcmk__node_attr_target(node);
if (target != NULL) {
node = target;
}
request = create_attrd_op(NULL);
crm_xml_add(request, PCMK__XA_TASK, PCMK__ATTRD_CMD_REFRESH);
pcmk__xe_add_node(request, node, 0);
if (api == NULL) {
rc = create_api(&api);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = connect_and_send_attrd_request(api, request);
destroy_api(api);
} else if (!pcmk_ipc_is_connected(api)) {
rc = connect_and_send_attrd_request(api, request);
} else {
rc = send_attrd_request(api, request);
}
free_xml(request);
crm_debug("Asked pacemaker-attrd to refresh %s: %s (%d)",
display_host, pcmk_rc_str(rc), rc);
return rc;
}
static void
add_op_attr(xmlNode *op, uint32_t options)
{
if (pcmk_all_flags_set(options, pcmk__node_attr_value | pcmk__node_attr_delay)) {
crm_xml_add(op, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE_BOTH);
} else if (pcmk_is_set(options, pcmk__node_attr_value)) {
crm_xml_add(op, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE);
} else if (pcmk_is_set(options, pcmk__node_attr_delay)) {
crm_xml_add(op, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE_DELAY);
}
}
static void
populate_update_op(xmlNode *op, const char *node, const char *name, const char *value,
const char *dampen, const char *set, uint32_t options)
{
if (pcmk_is_set(options, pcmk__node_attr_pattern)) {
crm_xml_add(op, PCMK__XA_ATTR_PATTERN, name);
} else {
crm_xml_add(op, PCMK__XA_ATTR_NAME, name);
}
+ if (pcmk_is_set(options, pcmk__node_attr_utilization)) {
+ crm_xml_add(op, PCMK__XA_ATTR_SET_TYPE, XML_TAG_UTILIZATION);
+ } else {
+ crm_xml_add(op, PCMK__XA_ATTR_SET_TYPE, XML_TAG_ATTR_SETS);
+ }
+
add_op_attr(op, options);
crm_xml_add(op, PCMK__XA_ATTR_VALUE, value);
crm_xml_add(op, PCMK__XA_ATTR_DAMPENING, dampen);
pcmk__xe_add_node(op, node, 0);
crm_xml_add(op, PCMK__XA_ATTR_SET, set);
crm_xml_add_int(op, PCMK__XA_ATTR_IS_REMOTE,
pcmk_is_set(options, pcmk__node_attr_remote));
crm_xml_add_int(op, PCMK__XA_ATTR_IS_PRIVATE,
pcmk_is_set(options, pcmk__node_attr_private));
if (pcmk_is_set(options, pcmk__node_attr_sync_local)) {
crm_xml_add(op, PCMK__XA_ATTR_SYNC_POINT, PCMK__VALUE_LOCAL);
} else if (pcmk_is_set(options, pcmk__node_attr_sync_cluster)) {
crm_xml_add(op, PCMK__XA_ATTR_SYNC_POINT, PCMK__VALUE_CLUSTER);
}
}
int
pcmk__attrd_api_update(pcmk_ipc_api_t *api, const char *node, const char *name,
const char *value, const char *dampen, const char *set,
const char *user_name, uint32_t options)
{
int rc = pcmk_rc_ok;
xmlNode *request = NULL;
const char *display_host = (node ? node : "localhost");
const char *target = NULL;
if (name == NULL) {
return EINVAL;
}
target = pcmk__node_attr_target(node);
if (target != NULL) {
node = target;
}
request = create_attrd_op(user_name);
populate_update_op(request, node, name, value, dampen, set, options);
if (api == NULL) {
rc = create_api(&api);
if (rc != pcmk_rc_ok) {
return rc;
}
rc = connect_and_send_attrd_request(api, request);
destroy_api(api);
} else if (!pcmk_ipc_is_connected(api)) {
rc = connect_and_send_attrd_request(api, request);
} else {
rc = send_attrd_request(api, request);
}
free_xml(request);
crm_debug("Asked pacemaker-attrd to update %s on %s: %s (%d)",
name, display_host, pcmk_rc_str(rc), rc);
return rc;
}
int
pcmk__attrd_api_update_list(pcmk_ipc_api_t *api, GList *attrs, const char *dampen,
const char *set, const char *user_name,
uint32_t options)
{
int rc = pcmk_rc_ok;
xmlNode *request = NULL;
if (attrs == NULL) {
return EINVAL;
}
/* There are two different ways of handling a list of attributes:
*
* (1) For messages originating from some command line tool, we have to send
* them one at a time. In this loop, we just call pcmk__attrd_api_update
* for each, letting it deal with creating the API object if it doesn't
* already exist.
*
* The reason we can't use a single message in this case is that we can't
* trust that the server supports it. Remote nodes could be involved
* here, and there's no guarantee that a newer client running on a remote
* node is talking to (or proxied through) a cluster node with a newer
* attrd. We also can't just try sending a single message and then falling
* back on multiple. There's no handshake with the attrd server to
* determine its version. And then we would need to do that fallback in the
* dispatch function for this to work for all connection types (mainloop in
* particular), and at that point we won't know what the original message
* was in order to break it apart and resend as individual messages.
*
* (2) For messages between daemons, we can be assured that the local attrd
* will support the new message and that it can send to the other attrds
* as one request or split up according to the minimum supported version.
*/
for (GList *iter = attrs; iter != NULL; iter = iter->next) {
pcmk__attrd_query_pair_t *pair = (pcmk__attrd_query_pair_t *) iter->data;
if (pcmk__is_daemon) {
const char *target = NULL;
xmlNode *child = NULL;
/* First time through this loop - create the basic request. */
if (request == NULL) {
request = create_attrd_op(user_name);
add_op_attr(request, options);
}
/* Add a child node for this operation. We add the task to the top
* level XML node so attrd_ipc_dispatch doesn't need changes. And
* then we also add the task to each child node in populate_update_op
* so attrd_client_update knows what form of update is taking place.
*/
child = create_xml_node(request, XML_ATTR_OP);
target = pcmk__node_attr_target(pair->node);
if (target != NULL) {
pair->node = target;
}
populate_update_op(child, pair->node, pair->name, pair->value, dampen,
set, options);
} else {
rc = pcmk__attrd_api_update(api, pair->node, pair->name, pair->value,
dampen, set, user_name, options);
}
}
/* If we were doing multiple attributes at once, we still need to send the
* request. Do that now, creating and destroying the API object if needed.
*/
if (pcmk__is_daemon) {
bool created_api = false;
if (api == NULL) {
rc = create_api(&api);
if (rc != pcmk_rc_ok) {
return rc;
}
created_api = true;
}
rc = connect_and_send_attrd_request(api, request);
free_xml(request);
if (created_api) {
destroy_api(api);
}
}
return rc;
}
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 3e82e32bd0..a9959eff5c 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,4397 +1,4389 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
CRM_TRACE_INIT_DATA(pe_status);
/* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
* use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
* flag is stringified more readably in log messages.
*/
#define set_config_flag(data_set, option, flag) do { \
const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
if (scf_value != NULL) { \
if (crm_is_true(scf_value)) { \
(data_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
LOG_TRACE, "Working set", \
crm_system_name, (data_set)->flags, \
(flag), #flag); \
} else { \
(data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
LOG_TRACE, "Working set", \
crm_system_name, (data_set)->flags, \
(flag), #flag); \
} \
} \
} while(0)
static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
xmlNode **last_failure,
enum action_fail_response *failed,
pe_working_set_t *data_set);
static void determine_remote_online_status(pe_working_set_t *data_set,
pe_node_t *this_node);
static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite,
pe_working_set_t *data_set);
static void determine_online_status(xmlNode *node_state, pe_node_t *this_node,
pe_working_set_t *data_set);
static void unpack_node_lrm(pe_node_t *node, xmlNode *xml,
pe_working_set_t *data_set);
// Bitmask for warnings we only want to print once
uint32_t pe_wo = 0;
static gboolean
is_dangling_guest_node(pe_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
* from both the config and the status section. */
if (pe__is_guest_or_remote_node(node) &&
node->details->remote_rsc &&
node->details->remote_rsc->container == NULL &&
pcmk_is_set(node->details->remote_rsc->flags,
pe_rsc_orphan_container_filler)) {
return TRUE;
}
return FALSE;
}
/*!
* \brief Schedule a fence action for a node
*
* \param[in,out] data_set Current working set of cluster
* \param[in,out] node Node to fence
* \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider `priority-fencing-delay`
*/
void
pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pe__is_guest_node(node)) {
pe_resource_t *rsc = node->details->remote_rsc->container;
if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
pe__node_name(node), reason, rsc->id);
} else {
crm_warn("Guest node %s will be fenced "
"(by recovering its guest resource %s): %s",
pe__node_name(node), rsc->id, reason);
/* We don't mark the node as unclean because that would prevent the
* node from running resources. We want to allow it to run resources
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
}
}
} else if (is_dangling_guest_node(node)) {
crm_info("Cleaning up dangling connection for guest node %s: "
"fencing was already done because %s, "
"and guest resource no longer exists",
pe__node_name(node), reason);
pe__set_resource_flags(node->details->remote_rsc,
pe_rsc_failed|pe_rsc_stop);
} else if (pe__is_remote_node(node)) {
pe_resource_t *rsc = node->details->remote_rsc;
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pe__node_name(node), reason);
} else if(node->details->remote_requires_reset == FALSE) {
node->details->remote_requires_reset = TRUE;
crm_warn("Remote node %s %s: %s",
pe__node_name(node),
pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply `priority-fencing-delay` for remote nodes
pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pe__node_name(node),
pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
reason);
} else {
crm_warn("Cluster node %s %s: %s",
pe__node_name(node),
pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
}
}
// @TODO xpaths can't handle templates, rules, or id-refs
// nvpair with provides or requires set to unfencing
#define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR \
"[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'" \
"or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \
"and @" XML_NVPAIR_ATTR_VALUE "='" PCMK__VALUE_UNFENCING "']"
// unfencing in rsc_defaults or any resource
#define XPATH_ENABLE_UNFENCING \
"/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES \
"//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR \
"|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG \
"/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
static void
set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
{
xmlXPathObjectPtr result = NULL;
if (!pcmk_is_set(data_set->flags, flag)) {
result = xpath_search(data_set->input, xpath);
if (result && (numXpathResults(result) > 0)) {
pe__set_working_set_flags(data_set, flag);
}
freeXpathObject(result);
}
}
gboolean
unpack_config(xmlNode * config, pe_working_set_t * data_set)
{
const char *value = NULL;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
data_set->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
CIB_OPTIONS_FIRST, FALSE, data_set);
verify_pe_options(data_set->config_hash);
set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
crm_info("Startup probes: disabled (dangerous)");
}
value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and stonith-watchdog-timeout is nonzero");
pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
value = pe_pref(data_set->config_hash, "stonith-timeout");
data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
crm_debug("STONITH of failed nodes is %s",
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
if (!strcmp(data_set->stonith_action, "poweroff")) {
pe_warn_once(pe_wo_poweroff,
"Support for stonith-action of 'poweroff' is deprecated "
"and will be removed in a future release (use 'off' instead)");
data_set->stonith_action = "off";
}
crm_trace("STONITH will %s nodes", data_set->stonith_action);
set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
crm_debug("Concurrent fencing is %s",
pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
value = pe_pref(data_set->config_hash,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
if (value) {
data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
}
set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
crm_debug("Stop all active resources: %s",
pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
value = pe_pref(data_set->config_hash, "no-quorum-policy");
if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
data_set->no_quorum_policy = no_quorum_ignore;
} else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
data_set->no_quorum_policy = no_quorum_freeze;
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
data_set->no_quorum_policy = no_quorum_demote;
} else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
int do_panic = 0;
crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
&do_panic);
if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
data_set->no_quorum_policy = no_quorum_suicide;
} else {
crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
data_set->no_quorum_policy = no_quorum_stop;
}
} else {
pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
"fencing is disabled");
data_set->no_quorum_policy = no_quorum_stop;
}
} else {
data_set->no_quorum_policy = no_quorum_stop;
}
switch (data_set->no_quorum_policy) {
case no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
case no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
case no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
case no_quorum_suicide:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
case no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
crm_trace("Orphan resources are %s",
pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
crm_trace("Orphan resource actions are %s",
pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
value = pe_pref(data_set->config_hash, "remove-after-stop");
if (value != NULL) {
if (crm_is_true(value)) {
pe__set_working_set_flags(data_set, pe_flag_remove_after_stop);
#ifndef PCMK__COMPAT_2_0
pe_warn_once(pe_wo_remove_after,
"Support for the remove-after-stop cluster property is"
" deprecated and will be removed in a future release");
#endif
} else {
pe__clear_working_set_flags(data_set, pe_flag_remove_after_stop);
}
}
set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
crm_trace("Maintenance mode: %s",
pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
crm_trace("Start failures are %s",
pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
}
if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
}
pe__unpack_node_health_scores(data_set);
data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
crm_trace("Placement strategy: %s", data_set->placement_strategy);
set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
crm_trace("Resources will%s be locked to cleanly shut down nodes",
(pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
value = pe_pref(data_set->config_hash,
XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
}
return TRUE;
}
pe_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
const char *score, pe_working_set_t * data_set)
{
pe_node_t *new_node = NULL;
if (pe_find_node(data_set->nodes, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
new_node = calloc(1, sizeof(pe_node_t));
if (new_node == NULL) {
return NULL;
}
new_node->weight = char2score(score);
new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
if (new_node->details == NULL) {
free(new_node);
return NULL;
}
crm_trace("Creating node for entry %s/%s", uname, id);
new_node->details->id = id;
new_node->details->uname = uname;
new_node->details->online = FALSE;
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
new_node->details->data_set = data_set;
if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
new_node->details->type = node_member;
} else if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
new_node->details->type = node_remote;
pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
* should be changed to 'member' at a compatibility break
*/
if (!pcmk__str_eq(type, "ping", pcmk__str_casei)) {
pcmk__config_warn("Node %s has unrecognized type '%s', "
"assuming 'ping'", pcmk__s(uname, "without name"),
type);
}
pe_warn_once(pe_wo_ping_node,
"Support for nodes of type 'ping' (such as %s) is "
"deprecated and will be removed in a future release",
pcmk__s(uname, "unnamed node"));
new_node->details->type = node_ping;
}
new_node->details->attrs = pcmk__strkey_table(free, free);
if (pe__is_guest_or_remote_node(new_node)) {
g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
strdup("remote"));
} else {
g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
strdup("cluster"));
}
new_node->details->utilization = pcmk__strkey_table(free, free);
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node,
pe__cmp_node_name);
return new_node;
}
static const char *
expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
const char *container_id = ID(xml_obj);
const char *remote_name = NULL;
const char *remote_server = NULL;
const char *remote_port = NULL;
const char *connect_timeout = "60s";
const char *remote_allow_migrate=NULL;
const char *is_managed = NULL;
for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL;
attr_set = pcmk__xe_next(attr_set)) {
if (!pcmk__str_eq((const char *)attr_set->name, XML_TAG_META_SETS,
pcmk__str_casei)) {
continue;
}
for (attr = pcmk__xe_first_child(attr_set); attr != NULL;
attr = pcmk__xe_next(attr)) {
const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
if (pcmk__str_eq(name, XML_RSC_ATTR_REMOTE_NODE, pcmk__str_casei)) {
remote_name = value;
} else if (pcmk__str_eq(name, "remote-addr", pcmk__str_casei)) {
remote_server = value;
} else if (pcmk__str_eq(name, "remote-port", pcmk__str_casei)) {
remote_port = value;
} else if (pcmk__str_eq(name, "remote-connect-timeout", pcmk__str_casei)) {
connect_timeout = value;
} else if (pcmk__str_eq(name, "remote-allow-migrate", pcmk__str_casei)) {
remote_allow_migrate=value;
} else if (pcmk__str_eq(name, XML_RSC_ATTR_MANAGED, pcmk__str_casei)) {
is_managed = value;
}
}
}
if (remote_name == NULL) {
return NULL;
}
if (pe_find_resource(data->resources, remote_name) != NULL) {
return NULL;
}
pe_create_remote_xml(parent, remote_name, container_id,
remote_allow_migrate, is_managed,
connect_timeout, remote_server, remote_port);
return remote_name;
}
static void
handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
{
if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
*/
return;
}
if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
} else {
// Blind faith ...
new_node->details->unclean = FALSE;
}
/* We need to be able to determine if a node's status section
* exists or not separate from whether the node is unclean. */
new_node->details->unseen = TRUE;
}
gboolean
unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
pe_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
const char *score = NULL;
- pe_rule_eval_data_t rule_data = {
- .node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
- .match_data = NULL,
- .rsc_data = NULL,
- .op_data = NULL
- };
-
for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, pcmk__str_none)) {
new_node = NULL;
id = crm_element_value(xml_obj, XML_ATTR_ID);
uname = crm_element_value(xml_obj, XML_ATTR_UNAME);
type = crm_element_value(xml_obj, XML_ATTR_TYPE);
score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
crm_trace("Processing node %s/%s", uname, id);
if (id == NULL) {
pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE
"> entry in configuration without id");
continue;
}
new_node = pe_create_node(id, uname, type, score, data_set);
if (new_node == NULL) {
return FALSE;
}
handle_startup_fencing(data_set, new_node);
add_node_attrs(xml_obj, new_node, FALSE, data_set);
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
- new_node->details->utilization, NULL,
- FALSE, data_set);
crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
}
}
if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
crm_info("Creating a fake local node");
pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
data_set);
}
return TRUE;
}
static void
setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
{
const char *container_id = NULL;
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) setup_container, data_set);
return;
}
container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
if (container) {
rsc->container = container;
pe__set_resource_flags(container, pe_rsc_is_container);
container->fillers = g_list_append(container->fillers, rsc);
pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
} else {
pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id);
}
}
}
gboolean
unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
/* Create remote nodes and guest nodes from the resource configuration
* before unpacking resources.
*/
for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
const char *new_node_id = NULL;
/* Check for remote nodes, which are defined by ocf:pacemaker:remote
* primitives.
*/
if (xml_contains_remote_node(xml_obj)) {
new_node_id = ID(xml_obj);
/* The "pe_find_node" check is here to make sure we don't iterate over
* an expanded node that has already been added to the node list. */
if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
data_set);
}
continue;
}
/* Check for guest nodes, which are defined by special meta-attributes
* of a primitive of any type (for example, VirtualDomain or Xen).
*/
if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
/* This will add an ocf:pacemaker:remote primitive to the
* configuration for the guest node's connection, to be unpacked
* later.
*/
new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
crm_trace("Found guest node %s in resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
data_set);
}
continue;
}
/* Check for guest nodes inside a group. Clones are currently not
* supported as guest nodes.
*/
if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, pcmk__str_none)) {
xmlNode *xml_obj2 = NULL;
for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
xml_obj2 = pcmk__xe_next(xml_obj2)) {
new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, ID(xml_obj2), ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
data_set);
}
}
}
}
return TRUE;
}
/* Call this after all the nodes and resources have been
* unpacked, but before the status section is read.
*
* A remote node's online status is reflected by the state
* of the remote node's connection resource. We need to link
* the remote node to this connection resource so we can have
* easy access to the connection resource during the scheduler calculations.
*/
static void
link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
{
pe_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
remote_node = pe_find_node(data_set->nodes, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pe_rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
new_rsc->id, pe__node_name(remote_node));
remote_node->details->remote_rsc = new_rsc;
if (new_rsc->container == NULL) {
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
handle_startup_fencing(data_set, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
* that we know the node is a guest node, update it correctly.
*/
g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND),
strdup("container"));
}
}
static void
destroy_tag(gpointer data)
{
pe_tag_t *tag = data;
if (tag) {
free(tag->id);
g_list_free_full(tag->refs, free);
free(tag);
}
}
/*!
* \internal
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
* \param[in,out] data_set Where to put resource information
*
* \return TRUE
*
* \note unpack_remote_nodes() MUST be called before this, so that the nodes can
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
data_set->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
pe_resource_t *new_rsc = NULL;
const char *id = ID(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
crm_element_name(xml_obj));
continue;
}
if (pcmk__str_eq((const char *) xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE,
pcmk__str_none)) {
if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
g_hash_table_insert(data_set->template_rsc_sets, strdup(id), NULL);
}
continue;
}
crm_trace("Unpacking <%s id='%s'>", crm_element_name(xml_obj), id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
data_set) == pcmk_rc_ok) {
data_set->resources = g_list_append(data_set->resources, new_rsc);
pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
crm_element_name(xml_obj), id);
}
}
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
setup_container(rsc, data_set);
link_rsc2remotenode(data_set, rsc);
}
data_set->resources = g_list_sort(data_set->resources,
pe__cmp_rsc_priority);
if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
/* Ignore */
} else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
&& !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
}
return TRUE;
}
gboolean
unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
{
xmlNode *xml_tag = NULL;
data_set->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
xml_tag = pcmk__xe_next(xml_tag)) {
xmlNode *xml_obj_ref = NULL;
const char *tag_id = ID(xml_tag);
if (!pcmk__str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, pcmk__str_none)) {
continue;
}
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
crm_element_name(xml_tag));
continue;
}
for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL;
xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
const char *obj_ref = ID(xml_obj_ref);
if (!pcmk__str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, pcmk__str_none)) {
continue;
}
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
crm_element_name(xml_obj_ref), tag_id);
continue;
}
if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
}
return TRUE;
}
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
{
const char *ticket_id = NULL;
const char *granted = NULL;
const char *last_granted = NULL;
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
pe_ticket_t *ticket = NULL;
ticket_id = ID(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
return FALSE;
}
crm_trace("Processing ticket state for %s", ticket_id);
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = ticket_new(ticket_id, data_set);
if (ticket == NULL) {
return FALSE;
}
}
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_ticket, prop_name);
if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
continue;
}
g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value));
}
granted = g_hash_table_lookup(ticket->state, "granted");
if (granted && crm_is_true(granted)) {
ticket->granted = TRUE;
crm_info("We have ticket '%s'", ticket->id);
} else {
ticket->granted = FALSE;
crm_info("We do not have ticket '%s'", ticket->id);
}
last_granted = g_hash_table_lookup(ticket->state, "last-granted");
if (last_granted) {
long long last_granted_ll;
pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
ticket->last_granted = (time_t) last_granted_ll;
}
standby = g_hash_table_lookup(ticket->state, "standby");
if (standby && crm_is_true(standby)) {
ticket->standby = TRUE;
if (ticket->granted) {
crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
}
} else {
ticket->standby = FALSE;
}
crm_trace("Done with ticket state for %s", ticket_id);
return TRUE;
}
static gboolean
unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
{
xmlNode *xml_obj = NULL;
for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
continue;
}
unpack_ticket_state(xml_obj, data_set);
}
return TRUE;
}
static void
unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_t * data_set)
{
const char *resource_discovery_enabled = NULL;
xmlNode *attrs = NULL;
pe_resource_t *rsc = NULL;
if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
return;
}
if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) {
return;
}
crm_trace("Processing Pacemaker Remote node %s", pe__node_name(this_node));
pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_MAINTENANCE),
&(this_node->details->remote_maintenance), 0);
rsc = this_node->details->remote_rsc;
if (this_node->details->remote_requires_reset == FALSE) {
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
}
attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
add_node_attrs(attrs, this_node, TRUE, data_set);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pe__node_name(this_node));
this_node->details->shutdown = TRUE;
}
if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) {
crm_info("%s is in standby mode", pe__node_name(this_node));
this_node->details->standby = TRUE;
}
if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
crm_info("%s is in maintenance mode", pe__node_name(this_node));
this_node->details->maintenance = TRUE;
}
resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
if (pe__is_remote_node(this_node)
&& !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
pe__node_name(this_node));
} else {
/* This is either a remote node with fencing enabled, or a guest
* node. We don't care whether fencing is enabled when fencing guest
* nodes, because they are "fenced" by recovering their containing
* resource.
*/
crm_info("%s has resource discovery disabled",
pe__node_name(this_node));
this_node->details->rsc_discovery_enabled = FALSE;
}
}
}
/*!
* \internal
* \brief Unpack a cluster node's transient attributes
*
* \param[in] state CIB node state XML
* \param[in] node Cluster node whose attributes are being unpacked
* \param[in] data_set Cluster working set
*/
static void
unpack_transient_attributes(xmlNode *state, pe_node_t *node,
pe_working_set_t *data_set)
{
const char *discovery = NULL;
xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
add_node_attrs(attrs, node, TRUE, data_set);
if (crm_is_true(pe_node_attribute_raw(node, "standby"))) {
crm_info("%s is in standby mode", pe__node_name(node));
node->details->standby = TRUE;
}
if (crm_is_true(pe_node_attribute_raw(node, "maintenance"))) {
crm_info("%s is in maintenance mode", pe__node_name(node));
node->details->maintenance = TRUE;
}
discovery = pe_node_attribute_raw(node, XML_NODE_ATTR_RSC_DISCOVERY);
if ((discovery != NULL) && !crm_is_true(discovery)) {
crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
" attribute for %s because disabling resource discovery "
"is not allowed for cluster nodes", pe__node_name(node));
}
}
/*!
* \internal
* \brief Unpack a node state entry (first pass)
*
* Unpack one node state entry from status. This unpacks information from the
* node_state element itself and node attributes inside it, but not the
* resource history inside it. Multiple passes through the status are needed to
* fully unpack everything.
*
* \param[in] state CIB node state XML
* \param[in] data_set Cluster working set
*/
static void
unpack_node_state(xmlNode *state, pe_working_set_t *data_set)
{
const char *id = NULL;
const char *uname = NULL;
pe_node_t *this_node = NULL;
id = crm_element_value(state, XML_ATTR_ID);
if (id == NULL) {
crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
XML_ATTR_ID);
return;
}
uname = crm_element_value(state, XML_ATTR_UNAME);
if (uname == NULL) {
crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
XML_ATTR_UNAME);
return;
}
this_node = pe_find_node_any(data_set->nodes, id, uname);
if (this_node == NULL) {
pcmk__config_warn("Ignoring recorded node state for '%s' because "
"it is no longer in the configuration", uname);
return;
}
if (pe__is_guest_or_remote_node(this_node)) {
/* We can't determine the online status of Pacemaker Remote nodes until
* after all resource history has been unpacked. In this first pass, we
* do need to mark whether the node has been fenced, as this plays a
* role during unpacking cluster node resource state.
*/
pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_FENCED),
&(this_node->details->remote_was_fenced), 0);
return;
}
unpack_transient_attributes(state, this_node, data_set);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
*/
this_node->details->unclean = FALSE;
this_node->details->unseen = FALSE;
crm_trace("Determining online status of cluster node %s (id %s)",
pe__node_name(this_node), id);
determine_online_status(state, this_node, data_set);
if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
&& this_node->details->online
&& (data_set->no_quorum_policy == no_quorum_suicide)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
pe_fence_node(data_set, this_node, "cluster does not have quorum",
FALSE);
}
}
/*!
* \internal
* \brief Unpack nodes' resource history as much as possible
*
* Unpack as many nodes' resource history as possible in one pass through the
* status. We need to process Pacemaker Remote nodes' connections/containers
* before unpacking their history; the connection/container history will be
* in another node's history, so it might take multiple passes to unpack
* everything.
*
* \param[in] status CIB XML status section
* \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
* \param[in] data_set Cluster working set
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(xmlNode *status, bool fence, pe_working_set_t *data_set)
{
int rc = pcmk_rc_ok;
// Loop through all node_state entries in CIB status
for (xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE);
state != NULL; state = crm_next_same_xml(state)) {
const char *id = ID(state);
const char *uname = crm_element_value(state, XML_ATTR_UNAME);
pe_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history from malformed "
XML_CIB_TAG_STATE " without id and/or uname");
continue;
}
this_node = pe_find_node_any(data_set->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
"no longer in configuration", id);
continue;
}
if (this_node->details->unpacked) {
crm_trace("Not unpacking resource history for node %s because "
"already unpacked", id);
continue;
}
if (fence) {
// We're processing all remaining nodes
} else if (pe__is_guest_node(this_node)) {
/* We can unpack a guest node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
pe_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED)
|| (rsc->container->role != RSC_ROLE_STARTED)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
continue;
}
} else if (pe__is_remote_node(this_node)) {
/* We can unpack a remote node's history only after we've unpacked
* other resource history to the point that we know that the node's
* connection is up, with the exception of when shutdown locks are
* in use.
*/
pe_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
|| (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)
&& (rsc->role != RSC_ROLE_STARTED))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
}
/* If fencing and shutdown locks are disabled and we're not processing
* unseen nodes, then we don't want to unpack offline nodes until online
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
} else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled
|pe_flag_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
continue;
}
if (pe__is_guest_or_remote_node(this_node)) {
determine_remote_online_status(data_set, this_node);
unpack_handle_remote_attrs(this_node, state, data_set);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
unpack_node_lrm(this_node, state, data_set);
rc = EAGAIN; // Other node histories might depend on this one
}
return rc;
}
/* remove nodes that are down, stopping */
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
unpack_status(xmlNode * status, pe_working_set_t * data_set)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
if (data_set->tickets == NULL) {
data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
unpack_tickets_state((xmlNode *) state, data_set);
} else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
unpack_node_state(state, data_set);
}
}
while (unpack_node_history(status, FALSE, data_set) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
data_set);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
if (data_set->stop_needed != NULL) {
for (GList *item = data_set->stop_needed; item; item = item->next) {
pe_resource_t *container = item->data;
pe_node_t *node = pe__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
g_list_free(data_set->stop_needed);
data_set->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
pe_node_t *this_node = gIter->data;
if (!pe__is_guest_or_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED,
"remote shutdown");
}
if (!this_node->details->unpacked) {
determine_remote_online_status(data_set, this_node);
}
}
return TRUE;
}
static gboolean
determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state,
pe_node_t * this_node)
{
gboolean online = FALSE;
const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
if (!crm_is_true(in_cluster)) {
crm_trace("Node is down: in_cluster=%s",
pcmk__s(in_cluster, ""));
} else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
crm_debug("Node is not ready to run resources: %s", join);
}
} else if (this_node->details->expected_up == FALSE) {
crm_trace("Controller is down: "
"in_cluster=%s is_peer=%s join=%s expected=%s",
pcmk__s(in_cluster, ""), pcmk__s(is_peer, ""),
pcmk__s(join, ""), pcmk__s(exp_state, ""));
} else {
/* mark it unclean */
pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
crm_info("in_cluster=%s is_peer=%s join=%s expected=%s",
pcmk__s(in_cluster, ""), pcmk__s(is_peer, ""),
pcmk__s(join, ""), pcmk__s(exp_state, ""));
}
return online;
}
static gboolean
determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state,
pe_node_t * this_node)
{
gboolean online = FALSE;
gboolean do_terminate = FALSE;
bool crmd_online = FALSE;
const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
const char *terminate = pe_node_attribute_raw(this_node, "terminate");
/*
- XML_NODE_IN_CLUSTER ::= true|false
- XML_NODE_IS_PEER ::= online|offline
- XML_NODE_JOIN_STATE ::= member|down|pending|banned
- XML_NODE_EXPECTED ::= member|down
*/
if (crm_is_true(terminate)) {
do_terminate = TRUE;
} else if (terminate != NULL && strlen(terminate) > 0) {
/* could be a time() value */
char t = terminate[0];
if (t != '0' && isdigit(t)) {
do_terminate = TRUE;
}
}
crm_trace("%s: in_cluster=%s is_peer=%s join=%s expected=%s term=%d",
pe__node_name(this_node), pcmk__s(in_cluster, ""),
pcmk__s(is_peer, ""), pcmk__s(join, ""),
pcmk__s(exp_state, ""), do_terminate);
online = crm_is_true(in_cluster);
crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
if (exp_state == NULL) {
exp_state = CRMD_JOINSTATE_DOWN;
}
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pe__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
online = crmd_online;
} else if (in_cluster == NULL) {
pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
} else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
pe_fence_node(data_set, this_node,
"peer failed Pacemaker membership criteria", FALSE);
} else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
if (crm_is_true(in_cluster) || crmd_online) {
crm_info("- %s is not ready to run resources",
pe__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
crm_trace("%s is down or still coming up",
pe__node_name(this_node));
}
} else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
&& crm_is_true(in_cluster) == FALSE && !crmd_online) {
crm_info("%s was just shot", pe__node_name(this_node));
online = FALSE;
} else if (crm_is_true(in_cluster) == FALSE) {
// Consider `priority-fencing-delay` for lost nodes
pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
} else if (!crmd_online) {
pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
} else if (do_terminate) {
pe_fence_node(data_set, this_node, "termination was requested", FALSE);
} else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
crm_info("%s is active", pe__node_name(this_node));
} else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources", pe__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
crm_warn("%s: in-cluster=%s is-peer=%s join=%s expected=%s term=%d shutdown=%d",
pe__node_name(this_node), pcmk__s(in_cluster, ""),
pcmk__s(is_peer, ""), pcmk__s(join, ""),
pcmk__s(exp_state, ""), do_terminate,
this_node->details->shutdown);
}
return online;
}
static void
determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
{
pe_resource_t *rsc = this_node->details->remote_rsc;
pe_resource_t *container = NULL;
pe_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
* be NULL. Consider it an offline remote node in that case.
*/
if (rsc == NULL) {
this_node->details->online = FALSE;
goto remote_online_done;
}
container = rsc->container;
if (container && pcmk__list_of_1(rsc->running_on)) {
host = rsc->running_on->data;
}
/* If the resource is currently started, mark it online. */
if (rsc->role == RSC_ROLE_STARTED) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
} else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
} else if (rsc->role == RSC_ROLE_STOPPED
|| (container && container->role == RSC_ROLE_STOPPED)) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = FALSE;
} else if (host && (host->details->online == FALSE)
&& host->details->unclean) {
crm_trace("Guest node %s UNCLEAN because host is unclean",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
}
remote_online_done:
crm_trace("Remote node %s online=%s",
this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
}
static void
determine_online_status(xmlNode * node_state, pe_node_t * this_node, pe_working_set_t * data_set)
{
gboolean online = FALSE;
const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
CRM_CHECK(this_node != NULL, return);
this_node->details->shutdown = FALSE;
this_node->details->expected_up = FALSE;
if (pe__shutdown_requested(this_node)) {
this_node->details->shutdown = TRUE;
} else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
this_node->details->expected_up = TRUE;
}
if (this_node->details->type == node_ping) {
this_node->details->unclean = FALSE;
online = FALSE; /* As far as resource management is concerned,
* the node is safely offline.
* Anyone caught abusing this logic will be shot
*/
} else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
online = determine_online_status_no_fencing(data_set, node_state, this_node);
} else {
online = determine_online_status_fencing(data_set, node_state, this_node);
}
if (online) {
this_node->details->online = TRUE;
} else {
/* remove node from contention */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -INFINITY;
}
if (online && this_node->details->shutdown) {
/* don't run resources here */
this_node->fixed = TRUE; // @COMPAT deprecated and unused
this_node->weight = -INFINITY;
}
if (this_node->details->type == node_ping) {
crm_info("%s is not a Pacemaker node", pe__node_name(this_node));
} else if (this_node->details->unclean) {
pe_proc_warn("%s is unclean", pe__node_name(this_node));
} else if (this_node->details->online) {
crm_info("%s is %s", pe__node_name(this_node),
this_node->details->shutdown ? "shutting down" :
this_node->details->pending ? "pending" :
this_node->details->standby ? "standby" :
this_node->details->maintenance ? "maintenance" : "online");
} else {
crm_trace("%s is offline", pe__node_name(this_node));
}
}
/*!
* \internal
* \brief Find the end of a resource's name, excluding any clone suffix
*
* \param[in] id Resource ID to check
*
* \return Pointer to last character of resource's base name
*/
const char *
pe_base_name_end(const char *id)
{
if (!pcmk__str_empty(id)) {
const char *end = id + strlen(id) - 1;
for (const char *s = end; s > id; --s) {
switch (*s) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
break;
case ':':
return (s == end)? s : (s - 1);
default:
return end;
}
}
return end;
}
return NULL;
}
/*!
* \internal
* \brief Get a resource name excluding any clone suffix
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_strip(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
char *basename = NULL;
CRM_ASSERT(end);
basename = strndup(last_rsc_id, end - last_rsc_id + 1);
CRM_ASSERT(basename);
return basename;
}
/*!
* \internal
* \brief Get the name of the first instance of a cloned resource
*
* \param[in] last_rsc_id Resource ID to check
*
* \return Pointer to newly allocated string with resource's base name plus :0
* \note It is the caller's responsibility to free() the result.
* This asserts on error, so callers can assume result is not NULL.
*/
char *
clone_zero(const char *last_rsc_id)
{
const char *end = pe_base_name_end(last_rsc_id);
size_t base_name_len = end - last_rsc_id + 1;
char *zero = NULL;
CRM_ASSERT(end);
zero = calloc(base_name_len + 3, sizeof(char));
CRM_ASSERT(zero);
memcpy(zero, last_rsc_id, base_name_len);
zero[base_name_len] = ':';
zero[base_name_len + 1] = '0';
return zero;
}
static pe_resource_t *
create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set)
{
pe_resource_t *rsc = NULL;
xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
copy_in_properties(xml_rsc, rsc_entry);
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
if (pe__unpack_resource(xml_rsc, &rsc, NULL, data_set) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
pe_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
node = pe_find_node(data_set->nodes, rsc_id);
if (node == NULL) {
node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
}
link_rsc2remotenode(data_set, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
node->details->shutdown = TRUE;
}
}
if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
}
pe__set_resource_flags(rsc, pe_rsc_orphan);
data_set->resources = g_list_append(data_set->resources, rsc);
return rsc;
}
/*!
* \internal
* \brief Create orphan instance for anonymous clone resource history
*/
static pe_resource_t *
create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
pe_node_t *node, pe_working_set_t *data_set)
{
pe_resource_t *top = pe__create_clone_child(parent, data_set);
// find_rsc() because we might be a cloned group
pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pe__node_name(node));
return orphan;
}
/*!
* \internal
* \brief Check a node for an instance of an anonymous clone
*
* Return a child instance of the specified anonymous clone, in order of
* preference: (1) the instance running on the specified node, if any;
* (2) an inactive instance (i.e. within the total of clone-max instances);
* (3) a newly created orphan (i.e. clone-max instances are already active).
*
* \param[in] data_set Cluster information
* \param[in] node Node on which to check for instance
* \param[in] parent Clone to check
* \param[in] rsc_id Name of cloned resource in history (without instance)
*/
static pe_resource_t *
find_anonymous_clone(pe_working_set_t * data_set, pe_node_t * node, pe_resource_t * parent,
const char *rsc_id)
{
GList *rIter = NULL;
pe_resource_t *rsc = NULL;
pe_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(parent != NULL);
CRM_ASSERT(pe_rsc_is_clone(parent));
CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
// Check for active (or partially active, for cloned groups) instance
pe_rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pe__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
pe_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
* for a resource before the resource's individual operation history
* entries are unpacked, locations will generally not contain the
* desired node.
*
* However, there are three exceptions:
* (1) when child is a cloned group and we have already unpacked the
* history of another member of the group on the same node;
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if globally-unique
* was flipped from true to false); and
* (3) when we re-run calculations on the same data set as part of a
* simulation.
*/
child->fns->location(child, &locations, 2);
if (locations) {
/* We should never associate the same numbered anonymous clone
* instance with multiple nodes, and clone instances can't migrate,
* so there must be only one location, regardless of history.
*/
CRM_LOG_ASSERT(locations->next == NULL);
if (((pe_node_t *)locations->data)->details == node->details) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
* need the particular member corresponding to rsc_id.
*
* If the history entry is orphaned, rsc will be NULL.
*/
rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
* happen if globally-unique is switched from true to
* false), we want to consider the instances beyond the
* first as orphans, even if there are inactive instance
* numbers available.
*/
if (rsc->running_on) {
crm_notice("Active (now-)anonymous clone %s has "
"multiple (orphan) instance histories on %s",
parent->id, pe__node_name(node));
skip_inactive = TRUE;
rsc = NULL;
} else {
pe_rsc_trace(parent, "Resource %s, active", rsc->id);
}
}
}
g_list_free(locations);
} else {
pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
&& !pcmk_is_set(child->flags, pe_rsc_block)) {
// Remember one inactive instance in case we don't find active
inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
pe_find_clone);
/* ... but don't use it if it was already associated with a
* pending action on another node
*/
if (inactive_instance && inactive_instance->pending_node
&& (inactive_instance->pending_node->details != node->details)) {
inactive_instance = NULL;
}
}
}
}
if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id);
rsc = inactive_instance;
}
/* If the resource has "requires" set to "quorum" or "nothing", and we don't
* have a clone instance for every node, we don't want to consume a valid
* instance number for unclean nodes. Such instances may appear to be active
* according to the history, but should be considered inactive, so we can
* start an instance elsewhere. Treat such instances as orphans.
*
* An exception is instances running on guest nodes -- since guest node
* "fencing" is actually just a resource stop, requires shouldn't apply.
*
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pe__is_guest_node(node)
&& !pe__is_universal_clone(parent, data_set)) {
rsc = NULL;
}
if (rsc == NULL) {
rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
static pe_resource_t *
unpack_find_resource(pe_working_set_t * data_set, pe_node_t * node, const char *rsc_id,
xmlNode * rsc_entry)
{
pe_resource_t *rsc = NULL;
pe_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
rsc = pe_find_resource(data_set->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
* check it again as a clone instance. Even when clone-max=0, we create
* a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
} else {
crm_trace("%s is not known as %s either (orphan)",
rsc_id, clone0_id);
}
free(clone0_id);
} else if (rsc->variant > pe_native) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
} else {
parent = uber_parent(rsc);
}
if (pe_rsc_is_anon_clone(parent)) {
if (pe_rsc_is_bundled(parent)) {
rsc = pe__find_bundle_replica(parent->parent, node);
} else {
char *base = clone_strip(rsc_id);
rsc = find_anonymous_clone(data_set, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
}
if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei)
&& !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) {
pcmk__str_update(&rsc->clone_name, rsc_id);
pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pe__node_name(node), rsc->id,
(pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
}
return rsc;
}
static pe_resource_t *
process_orphan_resource(xmlNode * rsc_entry, pe_node_t * node, pe_working_set_t * data_set)
{
pe_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
crm_debug("Detected orphan resource %s on %s", rsc_id, pe__node_name(node));
rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
if (rsc == NULL) {
return NULL;
}
if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
pe__clear_resource_flags(rsc, pe_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
}
return rsc;
}
static void
process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
enum action_fail_response on_fail,
xmlNode * migrate_op, pe_working_set_t * data_set)
{
pe_node_t *tmpnode = NULL;
char *reason = NULL;
enum action_fail_response save_on_fail = action_fail_ignore;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
rsc->id, role2text(rsc->role), pe__node_name(node),
fail2text(on_fail));
/* process current state */
if (rsc->role != RSC_ROLE_UNKNOWN) {
pe_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
pe_node_t *n = pe__copy_node(node);
pe_rsc_trace(rsc, "%s%s%s known on %s",
rsc->id,
((rsc->clone_name == NULL)? "" : " also known as "),
((rsc->clone_name == NULL)? "" : rsc->clone_name),
pe__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
break;
}
iter = iter->parent;
}
}
/* If a managed resource is believed to be running, but node is down ... */
if (rsc->role > RSC_ROLE_STOPPED
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
&& pcmk_is_set(rsc->flags, pe_rsc_managed)) {
gboolean should_fence = FALSE;
/* If this is a guest node, fence it (regardless of whether fencing is
* enabled, because guest node fencing is done by recovery of the
* container resource rather than by the fencer). Mark the resource
* we're processing as failed. When the guest comes back up, its
* operation history in the CIB will be cleared, freeing the affected
* resource to run again once we are sure we know its state.
*/
if (pe__is_guest_node(node)) {
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
should_fence = TRUE;
} else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
if (pe__is_remote_node(node) && node->details->remote_rsc
&& !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
* somewhere. This allows connection resources on a failed
* cluster node to move to another node without requiring the
* remote nodes to be fenced as well.
*/
node->details->unseen = TRUE;
reason = crm_strdup_printf("%s is active there (fencing will be"
" revoked if remote connection can "
"be re-established elsewhere)",
rsc->id);
}
should_fence = TRUE;
}
if (should_fence) {
if (reason == NULL) {
reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
}
pe_fence_node(data_set, node, reason, FALSE);
}
free(reason);
}
/* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
save_on_fail = on_fail;
if (node->details->unclean) {
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
on_fail = action_fail_ignore;
}
switch (on_fail) {
case action_fail_ignore:
/* nothing to do */
break;
case action_fail_demote:
pe__set_resource_flags(rsc, pe_rsc_failed);
demote_action(rsc, node, FALSE);
break;
case action_fail_fence:
/* treat it as if it is still running
* but also mark the node as unclean
*/
reason = crm_strdup_printf("%s failed there", rsc->id);
pe_fence_node(data_set, node, reason, FALSE);
free(reason);
break;
case action_fail_standby:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
case action_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
pe__clear_resource_flags(rsc, pe_rsc_managed);
pe__set_resource_flags(rsc, pe_rsc_block);
break;
case action_fail_migrate:
/* make sure it comes up somewhere else
* or not at all
*/
resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set);
break;
case action_fail_stop:
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop");
break;
case action_fail_recover:
if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
stop_action(rsc, node, FALSE);
}
break;
case action_fail_restart_container:
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
if (rsc->container && pe_rsc_is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
* container is running yet, so remember it and add a stop
* action for it later.
*/
data_set->stop_needed = g_list_prepend(data_set->stop_needed,
rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
} else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
stop_action(rsc, node, FALSE);
}
break;
case action_fail_reset_remote:
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pe_find_node(data_set->nodes, rsc->id);
}
if (tmpnode &&
pe__is_remote_node(tmpnode) &&
tmpnode->details->remote_was_fenced == 0) {
/* The remote connection resource failed in a way that
* should result in fencing the remote node.
*/
pe_fence_node(data_set, tmpnode,
"remote connection is unrecoverable", FALSE);
}
}
/* require the stop action regardless if fencing is occurring or not. */
if (rsc->role > RSC_ROLE_STOPPED) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset");
}
break;
}
/* ensure a remote-node connection failure forces an unclean remote-node
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
tmpnode = pe_find_node(data_set->nodes, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
pcmk__config_warn("Detected active orphan %s running on %s",
rsc->id, pe__node_name(node));
} else {
pcmk__config_warn("Resource '%s' must be stopped manually on "
"%s because cluster is configured not to "
"stop active orphans",
rsc->id, pe__node_name(node));
}
}
native_add_running(rsc, node, data_set, (save_on_fail != action_fail_ignore));
switch (on_fail) {
case action_fail_ignore:
break;
case action_fail_demote:
case action_fail_block:
pe__set_resource_flags(rsc, pe_rsc_failed);
break;
default:
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
break;
}
} else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
/* Only do this for older status sections that included instance numbers
* Otherwise stopped instances will appear as orphans
*/
pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id);
free(rsc->clone_name);
rsc->clone_name = NULL;
} else {
GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
pe_action_t *stop = (pe_action_t *) gIter->data;
pe__set_action_flags(stop, pe_action_optional);
}
g_list_free(possible_matches);
}
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
if (rsc->role == RSC_ROLE_STOPPED
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
rsc->role = RSC_ROLE_STARTED;
}
}
/* create active recurring operations as optional */
static void
process_recurring(pe_node_t * node, pe_resource_t * rsc,
int start_index, int stop_index,
GList *sorted_op_list, pe_working_set_t * data_set)
{
int counter = -1;
const char *task = NULL;
const char *status = NULL;
GList *gIter = sorted_op_list;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index);
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
guint interval_ms = 0;
char *key = NULL;
const char *id = ID(rsc_op);
counter++;
if (node->details->online == FALSE) {
pe_rsc_trace(rsc, "Skipping %s on %s: node is offline",
rsc->id, pe__node_name(node));
break;
/* Need to check if there's a monitor for role="Stopped" */
} else if (start_index < stop_index && counter <= stop_index) {
pe_rsc_trace(rsc, "Skipping %s on %s: resource is not active",
id, pe__node_name(node));
continue;
} else if (counter < start_index) {
pe_rsc_trace(rsc, "Skipping %s on %s: old %d",
id, pe__node_name(node), counter);
continue;
}
crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if (interval_ms == 0) {
pe_rsc_trace(rsc, "Skipping %s on %s: non-recurring",
id, pe__node_name(node));
continue;
}
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
pe_rsc_trace(rsc, "Skipping %s on %s: status",
id, pe__node_name(node));
continue;
}
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Creating %s on %s", key, pe__node_name(node));
custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
}
}
void
calculate_active_ops(GList *sorted_op_list, int *start_index, int *stop_index)
{
int counter = -1;
int implied_monitor_start = -1;
int implied_clone_start = -1;
const char *task = NULL;
const char *status = NULL;
GList *gIter = sorted_op_list;
*stop_index = -1;
*start_index = -1;
for (; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
} else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
*start_index = counter;
} else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
} else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
if (*start_index == -1) {
if (implied_clone_start != -1) {
*start_index = implied_clone_start;
} else if (implied_monitor_start != -1) {
*start_index = implied_monitor_start;
}
}
}
// If resource history entry has shutdown lock, remember lock node and time
static void
unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
if ((data_set->shutdown_lock > 0)
&& (get_effective_time(data_set)
> (lock_time + data_set->shutdown_lock))) {
pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pe__node_name(node));
pe__clear_resource_history(rsc, node, data_set);
} else {
rsc->lock_node = node;
rsc->lock_time = lock_time;
}
}
}
/*!
* \internal
* \brief Unpack one lrm_resource entry from a node's CIB status
*
* \param[in] node Node whose status is being unpacked
* \param[in] rsc_entry lrm_resource XML being unpacked
* \param[in] data_set Cluster working set
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
static pe_resource_t *
unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource,
pe_working_set_t *data_set)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
const char *task = NULL;
const char *rsc_id = ID(lrm_resource);
pe_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *migrate_op = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
enum action_fail_response on_fail = action_fail_ignore;
enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
if (rsc_id == NULL) {
crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE
" entry without id");
return NULL;
}
crm_trace("Unpacking " XML_LRM_TAG_RESOURCE " for %s on %s",
rsc_id, pe__node_name(node));
// Build a list of individual lrm_rsc_op entries, so we can sort them
for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP);
rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) {
op_list = g_list_prepend(op_list, rsc_op);
}
if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
}
}
/* find the resource */
rsc = unpack_find_resource(data_set, node, rsc_id, lrm_resource);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
rsc = process_orphan_resource(lrm_resource, node, data_set);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
unpack_shutdown_lock(lrm_resource, rsc, node, data_set);
}
/* process operations */
saved_role = rsc->role;
rsc->role = RSC_ROLE_UNKNOWN;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
migrate_op = rsc_op;
}
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set);
}
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
/* no need to free the contents */
g_list_free(sorted_op_list);
process_rsc_state(rsc, node, on_fail, migrate_op, data_set);
if (get_target_role(rsc, &req_role)) {
if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s"
" with requested next role %s",
rsc->id, role2text(rsc->next_role), role2text(req_role));
}
}
if (saved_role > rsc->role) {
rsc->role = saved_role;
}
return rsc;
}
static void
handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
{
xmlNode *rsc_entry = NULL;
for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL;
rsc_entry = pcmk__xe_next(rsc_entry)) {
pe_resource_t *rsc;
pe_resource_t *container;
const char *rsc_id;
const char *container_id;
if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_casei)) {
continue;
}
container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER);
rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
if (container_id == NULL || rsc_id == NULL) {
continue;
}
container = pe_find_resource(data_set->resources, container_id);
if (container == NULL) {
continue;
}
rsc = pe_find_resource(data_set->resources, rsc_id);
if (rsc == NULL ||
!pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
rsc->container != NULL) {
continue;
}
pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
rsc->id, container_id);
rsc->container = container;
container->fillers = g_list_append(container->fillers, rsc);
}
}
/*!
* \internal
* \brief Unpack one node's lrm status section
*
* \param[in] node Node whose status is being unpacked
* \param[in] xml CIB node state XML
* \param[in] data_set Cluster working set
*/
static void
unpack_node_lrm(pe_node_t *node, xmlNode *xml, pe_working_set_t *data_set)
{
bool found_orphaned_container_filler = false;
// Drill down to lrm_resources section
xml = find_xml_node(xml, XML_CIB_TAG_LRM, FALSE);
if (xml == NULL) {
return;
}
xml = find_xml_node(xml, XML_LRM_TAG_RESOURCES, FALSE);
if (xml == NULL) {
return;
}
// Unpack each lrm_resource entry
for (xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set);
if ((rsc != NULL)
&& pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
found_orphaned_container_filler = true;
}
}
/* Now that all resource state has been unpacked for this node, map any
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
handle_orphaned_container_fillers(xml, data_set);
}
}
static void
set_active(pe_resource_t * rsc)
{
pe_resource_t *top = uber_parent(rsc);
if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
rsc->role = RSC_ROLE_UNPROMOTED;
} else {
rsc->role = RSC_ROLE_STARTED;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
pe_node_t *node = value;
int *score = user_data;
node->weight = *score;
}
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
int target_rc, pe_working_set_t *data_set)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
"//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='", node, "']"
"//" XML_LRM_TAG_RESOURCE
"[@" XML_ATTR_ID "='", resource, "']"
"/" XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_TASK "='", op, "'",
NULL);
/* Need to check against transition_magic too? */
if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATE) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_TARGET "='", source, "']",
NULL);
} else if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATED) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_SOURCE "='", source, "']",
NULL);
} else {
g_string_append_c(xpath, ']');
}
xml = get_xpath_object((const char *) xpath->str, data_set->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
if (xml && target_rc >= 0) {
int rc = PCMK_OCF_UNKNOWN_ERROR;
int status = PCMK_EXEC_ERROR;
crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc);
crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status);
if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
return NULL;
}
}
return xml;
}
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
pe_working_set_t *data_set)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
xpath = g_string_sized_new(256);
pcmk__g_strcat(xpath,
"//" XML_CIB_TAG_STATE
"[@" XML_ATTR_UNAME "='", node_name, "']"
"//" XML_LRM_TAG_RESOURCE
"[@" XML_ATTR_ID "='", rsc_id, "']",
NULL);
xml = get_xpath_object((const char *) xpath->str, data_set->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
return xml;
}
static bool
unknown_on_node(const char *rsc_id, const char *node_name,
pe_working_set_t *data_set)
{
xmlNode *lrm_resource = NULL;
lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
/* If the resource has no lrm_rsc_op history on the node, that means its
* state is unknown there.
*/
return (lrm_resource == NULL
|| first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP) == NULL);
}
/*!
* \brief Check whether a probe/monitor indicating the resource was not running
* on a node happened after some event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] data_set Cluster working set
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
xmlNode *xml_op, bool same_node,
pe_working_set_t *data_set)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
xmlNode *monitor = find_lrm_op(rsc_id, CRMD_ACTION_STATUS, node_name,
NULL, PCMK_OCF_NOT_RUNNING, data_set);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
/*!
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] xml_op Event that non-monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
* \param[in] data_set Cluster working set
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name, xmlNode *xml_op,
bool same_node, pe_working_set_t *data_set)
{
xmlNode *lrm_resource = NULL;
lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
if (lrm_resource == NULL) {
return false;
}
for (xmlNode *op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP);
op != NULL; op = crm_next_same_xml(op)) {
const char * task = NULL;
if (op == xml_op) {
continue;
}
task = crm_element_value(op, XML_LRM_ATTR_TASK);
if (pcmk__str_any_of(task, CRMD_ACTION_START, CRMD_ACTION_STOP,
CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
}
return false;
}
/*!
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
* \param[in] rsc_id Resource being checked
* \param[in] node_name Node being checked
* \param[in] migrate_to Any migrate_to event that is being compared to
* \param[in] migrate_from Any migrate_from event that is being compared to
* \param[in] data_set Cluster working set
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
xmlNode *migrate_to, xmlNode *migrate_from,
pe_working_set_t *data_set)
{
xmlNode *xml_op = migrate_to;
const char *source = NULL;
const char *target = NULL;
bool same_node = false;
if (migrate_from) {
xml_op = migrate_from;
}
source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
/* It's preferred to compare to the migrate event on the same node if
* existing, since call ids are more reliable.
*/
if (pcmk__str_eq(node_name, target, pcmk__str_casei)) {
if (migrate_from) {
xml_op = migrate_from;
same_node = true;
} else {
xml_op = migrate_to;
}
} else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) {
if (migrate_to) {
xml_op = migrate_to;
same_node = true;
} else {
xml_op = migrate_from;
}
}
/* If there's any newer non-monitor operation on the node, or any newer
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
return non_monitor_after(rsc_id, node_name, xml_op, same_node, data_set)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
data_set);
}
static void
unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
pe_working_set_t *data_set)
{
/* A successful migration sequence is:
* migrate_to on source node
* migrate_from on target node
* stop on source node
*
* But there could be scenarios like (It's easier to produce with cluster
* property batch-limit=1):
*
* - rscA is live-migrating from node1 to node2.
*
* - Before migrate_to on node1 returns, put node2 into standby.
*
* - Transition aborts upon return of successful migrate_to on node1. New
* transition is going to stop the rscA on both nodes and start it on
* node1.
*
* - While it is stopping on node1, run something that is going to make
* the transition abort again like:
* crm_resource --resource rscA --ban --node node2
*
* - Transition aborts upon return of stop on node1.
*
* Now although there's a stop on node1, it's still a partial migration and
* rscA is still potentially active on node2.
*
* So even if a migrate_to is followed by a stop, we still need to check
* whether there's a corresponding migrate_from or any newer operation on
* the target.
*
* If no migrate_from has happened, the migration is considered to be
* "partial". If the migrate_from failed, make sure the resource gets
* stopped on both source and target (if up).
*
* If the migrate_to and migrate_from both succeeded (which also implies the
* resource is no longer running on the source), but there is no stop, the
* migration is considered to be "dangling". Schedule a stop on the source
* in this case.
*/
int from_rc = 0;
int from_status = 0;
pe_node_t *target_node = NULL;
pe_node_t *source_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
bool source_newer_op = false;
bool target_newer_state = false;
// Sanity check
CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
/* If there's any newer non-monitor operation on the source, this migrate_to
* potentially no longer matters for the source.
*/
source_newer_op = non_monitor_after(rsc->id, source, xml_op, true,
data_set);
// Check whether there was a migrate_from action on the target
migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
source, -1, data_set);
/* Even if there's a newer non-monitor operation on the source, we still
* need to check how this migrate_to might matter for the target.
*/
if (source_newer_op && migrate_from) {
return;
}
/* If the resource has newer state on the target after the migration
* events, this migrate_to no longer matters for the target.
*/
target_newer_state = newer_state_after_migrate(rsc->id, target, xml_op,
migrate_from, data_set);
if (source_newer_op && target_newer_state) {
return;
}
// Clones are not allowed to migrate, so role can't be promoted
rsc->role = RSC_ROLE_STARTED;
target_node = pe_find_node(data_set->nodes, target);
source_node = pe_find_node(data_set->nodes, source);
if (migrate_from) {
crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status);
pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d",
ID(migrate_from), target, from_status, from_rc);
}
if (migrate_from && from_rc == PCMK_OCF_OK
&& (from_status == PCMK_EXEC_DONE)) {
/* The migrate_to and migrate_from both succeeded, so mark the migration
* as "dangling". This will be used to schedule a stop action on the
* source without affecting the target.
*/
pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op),
source);
rsc->role = RSC_ROLE_STOPPED;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
} else if (migrate_from && (from_status != PCMK_EXEC_PENDING)) { // Failed
/* If the resource has newer state on the target, this migrate_to no
* longer matters for the target.
*/
if (!target_newer_state
&& target_node && target_node->details->online) {
pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
target_node->details->online);
native_add_running(rsc, target_node, data_set, TRUE);
} else {
/* With the earlier bail logic, migrate_from != NULL here implies
* source_newer_op is false, meaning this migrate_to still matters
* for the source.
* Consider it failed here - forces a restart, prevents migration
*/
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
}
} else { // Pending, or complete but erased
/* If the resource has newer state on the target, this migrate_to no
* longer matters for the target.
*/
if (!target_newer_state
&& target_node && target_node->details->online) {
pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
target_node->details->online);
native_add_running(rsc, target_node, data_set, FALSE);
if (source_node && source_node->details->online) {
/* This is a partial migration: the migrate_to completed
* successfully on the source, but the migrate_from has not
* completed. Remember the source and target; if the newly
* chosen target remains the same when we schedule actions
* later, we may continue with the migration.
*/
rsc->partial_migration_target = target_node;
rsc->partial_migration_source = source_node;
}
} else if (!source_newer_op) {
/* This migrate_to matters for the source only if it's the last
* non-monitor operation here.
* Consider it failed here - forces a restart, prevents migration
*/
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
}
}
}
static void
unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
pe_working_set_t *data_set)
{
xmlNode *target_migrate_from = NULL;
const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
// Sanity check
CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
rsc->role = RSC_ROLE_STARTED;
// Check for migrate_from on the target
target_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
source, PCMK_OCF_OK, data_set);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(rsc->id, target, data_set)
/* If the resource has newer state on the target after the migration
* events, this migrate_to no longer matters for the target.
*/
&& !newer_state_after_migrate(rsc->id, target, xml_op, target_migrate_from,
data_set)) {
/* The resource has no newer state on the target, so assume it's still
* active there.
* (if it is up).
*/
pe_node_t *target_node = pe_find_node(data_set->nodes, target);
if (target_node && target_node->details->online) {
native_add_running(rsc, target_node, data_set, FALSE);
}
} else if (!non_monitor_after(rsc->id, source, xml_op, true, data_set)) {
/* We know the resource has newer state on the target, but this
* migrate_to still matters for the source as long as there's no newer
* non-monitor operation there.
*/
// Mark node as having dangling migration so we can force a stop later
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
}
}
static void
unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node,
xmlNode *xml_op, pe_working_set_t *data_set)
{
xmlNode *source_migrate_to = NULL;
const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
// Sanity check
CRM_CHECK(source && target && !strcmp(target, node->details->uname), return);
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
rsc->role = RSC_ROLE_STARTED;
// Check for a migrate_to on the source
source_migrate_to = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE,
source, target, PCMK_OCF_OK, data_set);
if (/* If the resource state is unknown on the source, it will likely be
* probed there.
* Don't just consider it running there. We will get back here anyway in
* case the probe detects it's running there.
*/
!unknown_on_node(rsc->id, source, data_set)
/* If the resource has newer state on the source after the migration
* events, this migrate_from no longer matters for the source.
*/
&& !newer_state_after_migrate(rsc->id, source, source_migrate_to, xml_op,
data_set)) {
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
pe_node_t *source_node = pe_find_node(data_set->nodes, source);
if (source_node && source_node->details->online) {
native_add_running(rsc, source_node, data_set, TRUE);
}
}
}
static void
record_failed_op(xmlNode *op, const pe_node_t *node,
const pe_resource_t *rsc, pe_working_set_t *data_set)
{
xmlNode *xIter = NULL;
const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY);
if (node->details->online == FALSE) {
return;
}
for (xIter = data_set->failed->children; xIter; xIter = xIter->next) {
const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY);
const char *uname = crm_element_value(xIter, XML_ATTR_UNAME);
if(pcmk__str_eq(op_key, key, pcmk__str_casei) && pcmk__str_eq(uname, node->details->uname, pcmk__str_casei)) {
crm_trace("Skipping duplicate entry %s on %s",
op_key, pe__node_name(node));
return;
}
}
crm_trace("Adding entry %s on %s", op_key, pe__node_name(node));
crm_xml_add(op, XML_ATTR_UNAME, node->details->uname);
crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id);
add_node_copy(data_set->failed, op);
}
static const char *get_op_key(xmlNode *xml_op)
{
const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
if(key == NULL) {
key = ID(xml_op);
}
return key;
}
static char *
last_change_str(const xmlNode *xml_op)
{
time_t when;
char *result = NULL;
if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&when) == pcmk_ok) {
char *when_s = pcmk__epoch2str(&when, 0);
const char *p = strchr(when_s, ' ');
// Skip day of week to make message shorter
if ((p != NULL) && (*(++p) != '\0')) {
result = strdup(p);
CRM_ASSERT(result != NULL);
}
free(when_s);
}
if (result == NULL) {
result = strdup("unknown time");
CRM_ASSERT(result != NULL);
}
return result;
}
/*!
* \internal
* \brief Compare two on-fail values
*
* \param[in] first One on-fail value to compare
* \param[in] second The other on-fail value to compare
*
* \return A negative number if second is more severe than first, zero if they
* are equal, or a positive number if first is more severe than second.
* \note This is only needed until the action_fail_response values can be
* renumbered at the next API compatibility break.
*/
static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
case action_fail_demote:
switch (second) {
case action_fail_ignore:
return 1;
case action_fail_demote:
return 0;
default:
return -1;
}
break;
case action_fail_reset_remote:
switch (second) {
case action_fail_ignore:
case action_fail_demote:
case action_fail_recover:
return 1;
case action_fail_reset_remote:
return 0;
default:
return -1;
}
break;
case action_fail_restart_container:
switch (second) {
case action_fail_ignore:
case action_fail_demote:
case action_fail_recover:
case action_fail_reset_remote:
return 1;
case action_fail_restart_container:
return 0;
default:
return -1;
}
break;
default:
break;
}
switch (second) {
case action_fail_demote:
return (first == action_fail_ignore)? -1 : 1;
case action_fail_reset_remote:
switch (first) {
case action_fail_ignore:
case action_fail_demote:
case action_fail_recover:
return -1;
default:
return 1;
}
break;
case action_fail_restart_container:
switch (first) {
case action_fail_ignore:
case action_fail_demote:
case action_fail_recover:
case action_fail_reset_remote:
return -1;
default:
return 1;
}
break;
default:
break;
}
return first - second;
}
static void
unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure,
enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
bool is_probe = false;
pe_action_t *action = NULL;
const char *key = get_op_key(xml_op);
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *exit_reason = crm_element_value(xml_op,
XML_LRM_ATTR_EXIT_REASON);
char *last_change_s = NULL;
CRM_ASSERT(rsc);
CRM_CHECK(task != NULL, return);
*last_failure = xml_op;
is_probe = pcmk_xe_is_probe(xml_op);
last_change_s = last_change_str(xml_op);
if (exit_reason == NULL) {
exit_reason = "";
}
if (!pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)
&& (rc == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " rc=%d id=%s",
services_ocf_exitcode_str(rc),
(*exit_reason? ": " : ""), exit_reason,
(is_probe? "probe" : task), rsc->id, pe__node_name(node),
last_change_s, rc, ID(xml_op));
} else {
crm_warn("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " rc=%d id=%s",
services_ocf_exitcode_str(rc),
(*exit_reason? ": " : ""), exit_reason,
(is_probe? "probe" : task), rsc->id, pe__node_name(node),
last_change_s, rc, ID(xml_op));
if (is_probe && (rc != PCMK_OCF_OK)
&& (rc != PCMK_OCF_NOT_RUNNING)
&& (rc != PCMK_OCF_RUNNING_PROMOTED)) {
/* A failed (not just unexpected) probe result could mean the user
* didn't know resources will be probed even where they can't run.
*/
crm_notice("If it is not possible for %s to run on %s, see "
"the resource-discovery option for location constraints",
rsc->id, pe__node_name(node));
}
record_failed_op(xml_op, node, rsc, data_set);
}
free(last_change_s);
action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail),
fail2text(action->on_fail), action->uuid, key);
*on_fail = action->on_fail;
}
if (!strcmp(task, CRMD_ACTION_STOP)) {
resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set);
} else if (!strcmp(task, CRMD_ACTION_MIGRATE)) {
unpack_migrate_to_failure(rsc, node, xml_op, data_set);
} else if (!strcmp(task, CRMD_ACTION_MIGRATED)) {
unpack_migrate_from_failure(rsc, node, xml_op, data_set);
} else if (!strcmp(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_PROMOTED;
} else if (!strcmp(task, CRMD_ACTION_DEMOTE)) {
if (action->on_fail == action_fail_block) {
rsc->role = RSC_ROLE_PROMOTED;
pe__set_next_role(rsc, RSC_ROLE_STOPPED,
"demote with on-fail=block");
} else if(rc == PCMK_OCF_NOT_RUNNING) {
rsc->role = RSC_ROLE_STOPPED;
} else {
/* Staying in the promoted role would put the scheduler and
* controller into a loop. Setting the role to unpromoted is not
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
rsc->role = RSC_ROLE_UNPROMOTED;
}
}
if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) {
/* leave stopped */
pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id);
rsc->role = RSC_ROLE_STOPPED;
} else if (rsc->role < RSC_ROLE_STARTED) {
pe_rsc_trace(rsc, "Setting %s active", rsc->id);
set_active(rsc);
}
pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
rsc->id, role2text(rsc->role),
pcmk__btoa(node->details->unclean),
fail2text(action->on_fail), role2text(action->fail_role));
if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) {
pe__set_next_role(rsc, action->fail_role, "failure");
}
if (action->fail_role == RSC_ROLE_STOPPED) {
int score = -INFINITY;
pe_resource_t *fail_rsc = rsc;
if (fail_rsc->parent) {
pe_resource_t *parent = uber_parent(fail_rsc);
if (pe_rsc_is_clone(parent)
&& !pcmk_is_set(parent->flags, pe_rsc_unique)) {
/* For clone resources, if a child fails on an operation
* with on-fail = stop, all the resources fail. Do this by preventing
* the parent from coming up again. */
fail_rsc = parent;
}
}
crm_notice("%s will not be started under current conditions",
fail_rsc->id);
/* make sure it doesn't come up again */
if (fail_rsc->allowed_nodes != NULL) {
g_hash_table_destroy(fail_rsc->allowed_nodes);
}
fail_rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
}
pe_free_action(action);
}
/*!
* \internal
* \brief Check whether a resource with a failed action can be recovered
*
* If resource action is a failed stop and fencing is not possible, mark the
* resource as unmanaged and blocked, since recovery cannot be done.
*
* \param[in,out] rsc Resource with failed action
* \param[in] node Node where action failed
* \param[in] task Name of action that failed
* \param[in] exit_status Exit status of failed action (for logging only)
* \param[in] xml_op XML of failed action result (for logging only)
*/
static void
check_recoverable(pe_resource_t *rsc, pe_node_t *node, const char *task,
int exit_status, const xmlNode *xml_op)
{
const char *exit_reason = NULL;
char *last_change_s = NULL;
if (strcmp(task, CRMD_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(node->details->data_set, node)) {
return; // Failed stops are recoverable via fencing
}
exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
last_change_s = last_change_str(xml_op);
pe_proc_err("No further recovery can be attempted for %s "
"because %s on %s failed (%s%s%s) at %s "
CRM_XS " rc=%d id=%s", rsc->id, task, pe__node_name(node),
services_ocf_exitcode_str(exit_status),
((exit_reason == NULL)? "" : ": "), pcmk__s(exit_reason, ""),
last_change_s, exit_status, ID(xml_op));
free(last_change_s);
pe__clear_resource_flags(rsc, pe_rsc_managed);
pe__set_resource_flags(rsc, pe_rsc_block);
}
/*!
* \internal
* \brief Remap informational monitor results and operation status
*
* For the monitor results, certain OCF codes are for providing extended information
* to the user about services that aren't yet failed but not entirely healthy either.
* These must be treated as the "normal" result by Pacemaker.
*
* For operation status, the action result can be used to determine an appropriate
* status for the purposes of responding to the action. The status provided by the
* executor is not directly usable since the executor does not know what was expected.
*
* \param[in] xml_op Operation history entry XML from CIB status
* \param[in,out] rsc Resource that operation history entry is for
* \param[in] node Node where operation was executed
* \param[in] data_set Current cluster working set
* \param[in,out] on_fail What should be done about the result
* \param[in] target_rc Expected return code of operation
* \param[in,out] rc Actual return code of operation
* \param[in,out] status Operation execution status
*
* \note If the result is remapped and the node is not shutting down or failed,
* the operation will be recorded in the data set's list of failed operations
* to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
static void
remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set, enum action_fail_response *on_fail,
int target_rc, int *rc, int *status) {
bool is_probe = false;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *key = get_op_key(xml_op);
const char *exit_reason = crm_element_value(xml_op,
XML_LRM_ATTR_EXIT_REASON);
char *last_change_s = NULL;
if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_none)) {
int remapped_rc = pcmk__effective_rc(*rc);
if (*rc != remapped_rc) {
crm_trace("Remapping monitor result %d to %d", *rc, remapped_rc);
if (!node->details->shutdown || node->details->online) {
record_failed_op(xml_op, node, rsc, data_set);
}
*rc = remapped_rc;
}
}
if (!pe_rsc_is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
*status = PCMK_EXEC_DONE;
*rc = PCMK_OCF_NOT_RUNNING;
}
/* If the executor reported an operation status of anything but done or
* error, consider that final. But for done or error, we know better whether
* it should be treated as a failure or not, because we know the expected
* result.
*/
if (*status != PCMK_EXEC_DONE && *status != PCMK_EXEC_ERROR) {
return;
}
CRM_ASSERT(rsc);
CRM_CHECK(task != NULL,
*status = PCMK_EXEC_ERROR; return);
*status = PCMK_EXEC_DONE;
if (exit_reason == NULL) {
exit_reason = "";
}
is_probe = pcmk_xe_is_probe(xml_op);
if (is_probe) {
task = "probe";
}
if (target_rc < 0) {
/* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
* Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
* target_rc in the transition key, which (along with the similar case
* of a corrupted transition key in the CIB) will be reported to this
* function as -1. Pacemaker 2.0+ does not support rolling upgrades from
* those versions or processing of saved CIB files from those versions,
* so we do not need to care much about this case.
*/
*status = PCMK_EXEC_ERROR;
crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)",
key, pe__node_name(node));
} else if (target_rc != *rc) {
*status = PCMK_EXEC_ERROR;
pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)",
key, pe__node_name(node),
target_rc, services_ocf_exitcode_str(target_rc),
*rc, services_ocf_exitcode_str(*rc),
(*exit_reason? ": " : ""), exit_reason);
}
last_change_s = last_change_str(xml_op);
switch (*rc) {
case PCMK_OCF_OK:
if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) {
*status = PCMK_EXEC_DONE;
pe_rsc_info(rsc, "Probe found %s active on %s at %s",
rsc->id, pe__node_name(node), last_change_s);
}
break;
case PCMK_OCF_NOT_RUNNING:
if (is_probe || (target_rc == *rc)
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
*status = PCMK_EXEC_DONE;
rsc->role = RSC_ROLE_STOPPED;
/* clear any previous failure actions */
*on_fail = action_fail_ignore;
pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "not running");
}
break;
case PCMK_OCF_RUNNING_PROMOTED:
if (is_probe && (*rc != target_rc)) {
*status = PCMK_EXEC_DONE;
pe_rsc_info(rsc,
"Probe found %s active and promoted on %s at %s",
rsc->id, pe__node_name(node), last_change_s);
}
rsc->role = RSC_ROLE_PROMOTED;
break;
case PCMK_OCF_DEGRADED_PROMOTED:
case PCMK_OCF_FAILED_PROMOTED:
rsc->role = RSC_ROLE_PROMOTED;
*status = PCMK_EXEC_ERROR;
break;
case PCMK_OCF_NOT_CONFIGURED:
*status = PCMK_EXEC_ERROR_FATAL;
break;
case PCMK_OCF_UNIMPLEMENT_FEATURE:
{
guint interval_ms = 0;
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS,
&interval_ms);
if (interval_ms == 0) {
check_recoverable(rsc, node, task, *rc, xml_op);
*status = PCMK_EXEC_ERROR_HARD;
} else {
*status = PCMK_EXEC_NOT_SUPPORTED;
}
}
break;
case PCMK_OCF_NOT_INSTALLED:
case PCMK_OCF_INVALID_PARAM:
case PCMK_OCF_INSUFFICIENT_PRIV:
check_recoverable(rsc, node, task, *rc, xml_op);
*status = PCMK_EXEC_ERROR_HARD;
break;
default:
if (*status == PCMK_EXEC_DONE) {
crm_info("Treating unknown exit status %d from %s of %s "
"on %s at %s as failure",
*rc, task, rsc->id, pe__node_name(node),
last_change_s);
*status = PCMK_EXEC_ERROR;
}
break;
}
free(last_change_s);
pe_rsc_trace(rsc, "Remapped %s status to '%s'",
key, pcmk_exec_status_str(*status));
}
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(xmlNode *xml_op, const char *task,
pe_resource_t *rsc, pe_node_t *node,
pe_working_set_t *data_set)
{
if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
if (pe__bundle_needs_remote_name(rsc, data_set)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
data_set);
} else {
op_digest_cache_t *digest_data = NULL;
digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set);
switch (digest_data->rc) {
case RSC_DIGEST_UNKNOWN:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, get_op_key(xml_op), node->details->id);
break;
case RSC_DIGEST_MATCH:
break;
default:
return TRUE;
}
}
}
return FALSE;
}
// Order action after fencing of remote node, given connection rsc
static void
order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
pe_working_set_t *data_set)
{
pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
if (remote_node) {
pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
FALSE, data_set);
order_actions(fence, action, pe_order_implies_then);
}
}
static bool
should_ignore_failure_timeout(pe_resource_t *rsc, xmlNode *xml_op,
const char *task, guint interval_ms,
bool is_last_failure, pe_working_set_t *data_set)
{
/* Clearing failures of recurring monitors has special concerns. The
* executor reports only changes in the monitor result, so if the
* monitor is still active and still getting the same failure result,
* that will go undetected after the failure is cleared.
*
* Also, the operation history will have the time when the recurring
* monitor result changed to the given code, not the time when the
* result last happened.
*
* @TODO We probably should clear such failures only when the failure
* timeout has passed since the last occurrence of the failed result.
* However we don't record that information. We could maybe approximate
* that by clearing only if there is a more recent successful monitor or
* stop result, but we don't even have that information at this point
* since we are still unpacking the resource's operation history.
*
* This is especially important for remote connection resources with a
* reconnect interval, so in that case, we skip clearing failures
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
&& pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
&& (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
crm_info("Waiting to clear monitor failure for remote node %s"
" until fencing has occurred", rsc->id);
}
return TRUE;
}
}
return FALSE;
}
/*!
* \internal
* \brief Check operation age and schedule failure clearing when appropriate
*
* This function has two distinct purposes. The first is to check whether an
* operation history entry is expired (i.e. the resource has a failure timeout,
* the entry is older than the timeout, and the resource either has no fail
* count or its fail count is entirely older than the timeout). The second is to
* schedule fail count clearing when appropriate (i.e. the operation is expired
* and either the resource has an expired fail count or the operation is a
* last_failure for a remote connection resource with a reconnect interval,
* or the operation is a last_failure for a start or monitor operation and the
* resource's parameters have changed since the operation).
*
* \param[in] rsc Resource that operation happened to
* \param[in] node Node that operation happened on
* \param[in] rc Actual result of operation
* \param[in] xml_op Operation history entry XML
* \param[in] data_set Current working set
*
* \return TRUE if operation history entry is expired, FALSE otherwise
*/
static bool
check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc,
xmlNode *xml_op, pe_working_set_t *data_set)
{
bool expired = FALSE;
bool is_last_failure = pcmk__ends_with(ID(xml_op), "_last_failure_0");
time_t last_run = 0;
guint interval_ms = 0;
int unexpired_fail_count = 0;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *clear_reason = NULL;
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if ((rsc->failure_timeout > 0)
&& (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
&last_run) == 0)) {
// Resource has a failure-timeout, and history entry has a timestamp
time_t now = get_effective_time(data_set);
time_t last_failure = 0;
// Is this particular operation history older than the failure timeout?
if ((now >= (last_run + rsc->failure_timeout))
&& !should_ignore_failure_timeout(rsc, xml_op, task, interval_ms,
is_last_failure, data_set)) {
expired = TRUE;
}
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(node, rsc, &last_failure,
pe_fc_effective, xml_op,
data_set);
// Update scheduler recheck time according to *last* failure
crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
" last-failure@%lld",
ID(xml_op), (long long) last_run, (expired? "" : "not "),
(long long) now, unexpired_fail_count, rsc->failure_timeout,
(long long) last_failure);
last_failure += rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
pe__update_recheck_time(last_failure, data_set);
}
}
if (expired) {
if (pe_get_failcount(node, rsc, NULL, pe_fc_default, xml_op, data_set)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
// There is no fail count considering timeout
clear_reason = "it expired";
} else {
/* This operation is old, but there is an unexpired fail count.
* In a properly functioning cluster, this should only be
* possible if this operation is not a failure (otherwise the
* fail count should be expired too), so this is really just a
* failsafe.
*/
expired = FALSE;
}
} else if (is_last_failure && rsc->remote_reconnect_ms) {
/* Clear any expired last failure when reconnect interval is set,
* even if there is no fail count.
*/
clear_reason = "reconnect interval is set";
}
}
if (!expired && is_last_failure
&& should_clear_for_param_change(xml_op, task, rsc, node, data_set)) {
clear_reason = "resource parameters have changed";
}
if (clear_reason != NULL) {
// Schedule clearing of the fail count
pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason,
data_set);
if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
&& rsc->remote_reconnect_ms) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
* completes.
*
* We could limit this to remote_node->details->unclean, but at
* this point, that's always true (it won't be reliable until
* after unpack_node_history() is done).
*/
crm_info("Clearing %s failure will wait until any scheduled "
"fencing of %s completes", task, rsc->id);
order_after_remote_fencing(clear_op, rsc, data_set);
}
}
if (expired && (interval_ms == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
switch(rc) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
case PCMK_OCF_RUNNING_PROMOTED:
case PCMK_OCF_DEGRADED:
case PCMK_OCF_DEGRADED_PROMOTED:
// Don't expire probes that return these values
expired = FALSE;
break;
}
}
return expired;
}
int pe__target_rc_from_xml(xmlNode *xml_op)
{
int target_rc = 0;
const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
if (key == NULL) {
return -1;
}
decode_transition_key(key, NULL, NULL, NULL, &target_rc);
return target_rc;
}
static enum action_fail_response
get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set)
{
enum action_fail_response result = action_fail_recover;
pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
result = action->on_fail;
pe_free_action(action);
return result;
}
static void
update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, const char * task, int rc,
xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set)
{
gboolean clear_past_failure = FALSE;
CRM_ASSERT(rsc);
CRM_ASSERT(xml_op);
if (rc == PCMK_OCF_NOT_INSTALLED || (!pe_rsc_is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op))) {
rsc->role = RSC_ROLE_STOPPED;
} else if (rc == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = TRUE;
} else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
if (last_failure) {
const char *op_key = get_op_key(xml_op);
const char *last_failure_key = get_op_key(last_failure);
if (pcmk__str_eq(op_key, last_failure_key, pcmk__str_casei)) {
clear_past_failure = TRUE;
}
}
if (rsc->role < RSC_ROLE_STARTED) {
set_active(rsc);
}
} else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
rsc->role = RSC_ROLE_STARTED;
clear_past_failure = TRUE;
} else if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
rsc->role = RSC_ROLE_STOPPED;
clear_past_failure = TRUE;
} else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
rsc->role = RSC_ROLE_PROMOTED;
clear_past_failure = TRUE;
} else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
if (*on_fail == action_fail_demote) {
// Demote clears an error only if on-fail=demote
clear_past_failure = TRUE;
}
rsc->role = RSC_ROLE_UNPROMOTED;
} else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
rsc->role = RSC_ROLE_STARTED;
clear_past_failure = TRUE;
} else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
unpack_migrate_to_success(rsc, node, xml_op, data_set);
} else if (rsc->role < RSC_ROLE_STARTED) {
pe_rsc_trace(rsc, "%s active on %s", rsc->id, pe__node_name(node));
set_active(rsc);
}
/* clear any previous failure actions */
if (clear_past_failure) {
switch (*on_fail) {
case action_fail_stop:
case action_fail_fence:
case action_fail_migrate:
case action_fail_standby:
pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop",
rsc->id, fail2text(*on_fail));
break;
case action_fail_block:
case action_fail_ignore:
case action_fail_demote:
case action_fail_recover:
case action_fail_restart_container:
*on_fail = action_fail_ignore;
pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "clear past failures");
break;
case action_fail_reset_remote:
if (rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
* completely stopped. (With a reconnect interval, we wait
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
*on_fail = action_fail_ignore;
pe__set_next_role(rsc, RSC_ROLE_UNKNOWN,
"clear past failures and reset remote");
}
break;
}
}
}
static void
unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail,
pe_working_set_t *data_set)
{
int rc = 0;
int old_rc = 0;
int task_id = 0;
int target_rc = 0;
int old_target_rc = 0;
int status = PCMK_EXEC_UNKNOWN;
guint interval_ms = 0;
const char *task = NULL;
const char *task_key = NULL;
const char *exit_reason = NULL;
bool expired = false;
pe_resource_t *parent = rsc;
enum action_fail_response failure_strategy = action_fail_recover;
bool maskable_probe_failure = false;
char *last_change_s = NULL;
CRM_CHECK(rsc && node && xml_op, return);
target_rc = pe__target_rc_from_xml(xml_op);
task_key = get_op_key(xml_op);
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
if (exit_reason == NULL) {
exit_reason = "";
}
crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc);
crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id);
crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status);
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
CRM_CHECK(task != NULL, return);
CRM_CHECK((status >= PCMK_EXEC_PENDING) && (status <= PCMK_EXEC_MAX),
return);
if (!strcmp(task, CRMD_ACTION_NOTIFY) ||
!strcmp(task, CRMD_ACTION_METADATA)) {
/* safe to ignore these */
return;
}
if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
parent = uber_parent(rsc);
}
pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)",
task_key, task, task_id, status, rc, pe__node_name(node),
role2text(rsc->role));
if (node->details->unclean) {
pe_rsc_trace(rsc,
"%s is running on %s, which is unclean (further action "
"depends on value of stop's on-fail attribute)",
rsc->id, pe__node_name(node));
}
/* It should be possible to call remap_operation() first then call
* check_operation_expiry() only if rc != target_rc, because there should
* never be a fail count without at least one unexpected result in the
* resource history. That would be more efficient by avoiding having to call
* check_operation_expiry() for expected results.
*
* However, we do have such configurations in the scheduler regression
* tests, even if it shouldn't be possible with the current code. It's
* probably a good idea anyway, but that would require updating the test
* inputs to something currently possible.
*/
if ((status != PCMK_EXEC_NOT_INSTALLED)
&& check_operation_expiry(rsc, node, rc, xml_op, data_set)) {
expired = true;
}
old_rc = rc;
old_target_rc = target_rc;
remap_operation(xml_op, rsc, node, data_set, on_fail, target_rc,
&rc, &status);
maskable_probe_failure = !pe_rsc_is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op);
last_change_s = last_change_str(xml_op);
if (expired && maskable_probe_failure && old_rc != old_target_rc) {
if (rsc->role <= RSC_ROLE_STOPPED) {
rsc->role = RSC_ROLE_UNKNOWN;
}
goto done;
} else if (expired && (rc != target_rc)) {
const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC);
if (interval_ms == 0) {
crm_notice("Ignoring expired %s failure on %s "
CRM_XS " actual=%d expected=%d magic=%s",
task_key, pe__node_name(node), rc, target_rc, magic);
goto done;
} else if(node->details->online && node->details->unclean == FALSE) {
/* Reschedule the recurring monitor. schedule_cancel() won't work at
* this stage, so as a hacky workaround, forcibly change the restart
* digest so pcmk__check_action_config() does what we want later.
*
* @TODO We should skip this if there is a newer successful monitor.
* Also, this causes rescheduling only if the history entry
* has an op-digest (which the expire-non-blocked-failure
* scheduler regression test doesn't, but that may not be a
* realistic scenario in production).
*/
crm_notice("Rescheduling %s after failure expired on %s "
CRM_XS " actual=%d expected=%d magic=%s",
task_key, pe__node_name(node), rc, target_rc, magic);
crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout");
goto done;
}
}
if (maskable_probe_failure) {
crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
services_ocf_exitcode_str(old_rc), rsc->id,
pe__node_name(node));
update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure,
on_fail, data_set);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
record_failed_op(xml_op, node, rsc, data_set);
resource_location(parent, node, -INFINITY, "masked-probe-failure", data_set);
goto done;
}
switch (status) {
case PCMK_EXEC_CANCELLED:
// Should never happen
pe_err("Resource history contains cancellation '%s' "
"(%s of %s on %s at %s)",
ID(xml_op), task, rsc->id, pe__node_name(node),
last_change_s);
goto done;
case PCMK_EXEC_PENDING:
if (!strcmp(task, CRMD_ACTION_START)) {
pe__set_resource_flags(rsc, pe_rsc_start_pending);
set_active(rsc);
} else if (!strcmp(task, CRMD_ACTION_PROMOTE)) {
rsc->role = RSC_ROLE_PROMOTED;
} else if (!strcmp(task, CRMD_ACTION_MIGRATE) && node->details->unclean) {
/* If a pending migrate_to action is out on a unclean node,
* we have to force the stop action on the target. */
const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
pe_node_t *target = pe_find_node(data_set->nodes, migrate_target);
if (target) {
stop_action(rsc, target, FALSE);
}
}
if (rsc->pending_task == NULL) {
if ((interval_ms != 0) || strcmp(task, CRMD_ACTION_STATUS)) {
rsc->pending_task = strdup(task);
rsc->pending_node = node;
} else {
/* Pending probes are not printed, even if pending
* operations are requested. If someone ever requests that
* behavior, enable the below and the corresponding part of
* native.c:native_pending_task().
*/
#if 0
rsc->pending_task = strdup("probe");
rsc->pending_node = node;
#endif
}
}
goto done;
case PCMK_EXEC_DONE:
pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s",
task, rsc->id, pe__node_name(node),
last_change_s, ID(xml_op));
update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set);
goto done;
case PCMK_EXEC_NOT_INSTALLED:
failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
if (failure_strategy == action_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
task, rsc->id, pe__node_name(node), status, rc,
ID(xml_op));
/* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
*on_fail = action_fail_migrate;
}
resource_location(parent, node, -INFINITY, "hard-error", data_set);
unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pe__is_guest_or_remote_node(node)
&& pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
* fail-safe in case a bug or unusual circumstances do lead to
* that, ensure the remote connection is considered failed.
*/
pe__set_resource_flags(node->details->remote_rsc,
pe_rsc_failed|pe_rsc_stop);
}
break; // Not done, do error handling
case PCMK_EXEC_ERROR:
case PCMK_EXEC_ERROR_HARD:
case PCMK_EXEC_ERROR_FATAL:
case PCMK_EXEC_TIMEOUT:
case PCMK_EXEC_NOT_SUPPORTED:
case PCMK_EXEC_INVALID:
break; // Not done, do error handling
case PCMK_EXEC_NO_FENCE_DEVICE:
case PCMK_EXEC_NO_SECRETS:
status = PCMK_EXEC_ERROR_HARD;
break; // Not done, do error handling
}
failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
if ((failure_strategy == action_fail_ignore)
|| (failure_strategy == action_fail_restart_container
&& !strcmp(task, CRMD_ACTION_STOP))) {
crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s "
"succeeded " CRM_XS " rc=%d id=%s",
task, services_ocf_exitcode_str(rc),
(*exit_reason? ": " : ""), exit_reason, rsc->id,
pe__node_name(node), last_change_s, rc, ID(xml_op));
update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure,
on_fail, data_set);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
record_failed_op(xml_op, node, rsc, data_set);
if ((failure_strategy == action_fail_restart_container)
&& cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
*on_fail = failure_strategy;
}
} else {
unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail,
data_set);
if (status == PCMK_EXEC_ERROR_HARD) {
do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE,
"Preventing %s from restarting on %s because "
"of hard failure (%s%s%s)" CRM_XS " rc=%d id=%s",
parent->id, pe__node_name(node),
services_ocf_exitcode_str(rc),
(*exit_reason? ": " : ""), exit_reason,
rc, ID(xml_op));
resource_location(parent, node, -INFINITY, "hard-error", data_set);
} else if (status == PCMK_EXEC_ERROR_FATAL) {
crm_err("Preventing %s from restarting anywhere because "
"of fatal failure (%s%s%s) " CRM_XS " rc=%d id=%s",
parent->id, services_ocf_exitcode_str(rc),
(*exit_reason? ": " : ""), exit_reason,
rc, ID(xml_op));
resource_location(parent, NULL, -INFINITY, "fatal-error", data_set);
}
}
done:
free(last_change_s);
pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
rsc->id, task, role2text(rsc->role),
role2text(rsc->next_role));
}
static void
add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite,
pe_working_set_t *data_set)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
.role = RSC_ROLE_UNKNOWN,
.now = data_set->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_UNAME), strdup(node->details->uname));
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
strdup(node->details->id));
if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
data_set->dc_node = node;
node->details->is_dc = TRUE;
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
} else {
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
}
cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
if (cluster_name) {
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
strdup(cluster_name));
}
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
node->details->attrs, NULL, overwrite, data_set);
+ pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
+ node->details->utilization, NULL,
+ FALSE, data_set);
+
if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
const char *site_name = pe_node_attribute_raw(node, "site-name");
if (site_name) {
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_SITE_NAME),
strdup(site_name));
} else if (cluster_name) {
/* Default to cluster-name if unset */
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_SITE_NAME),
strdup(cluster_name));
}
}
}
static GList *
extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
{
int counter = -1;
int stop_index = -1;
int start_index = -1;
xmlNode *rsc_op = NULL;
GList *gIter = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
/* extract operations */
op_list = NULL;
sorted_op_list = NULL;
for (rsc_op = pcmk__xe_first_child(rsc_entry);
rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP,
pcmk__str_none)) {
crm_xml_add(rsc_op, "resource", rsc);
crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
op_list = g_list_prepend(op_list, rsc_op);
}
}
if (op_list == NULL) {
/* if there are no operations, there is nothing to do */
return NULL;
}
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
/* create active recurring operations as optional */
if (active_filter == FALSE) {
return sorted_op_list;
}
op_list = NULL;
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
xmlNode *rsc_op = (xmlNode *) gIter->data;
counter++;
if (start_index < stop_index) {
crm_trace("Skipping %s: not active", ID(rsc_entry));
break;
} else if (counter < start_index) {
crm_trace("Skipping %s: old", ID(rsc_op));
continue;
}
op_list = g_list_append(op_list, rsc_op);
}
g_list_free(sorted_op_list);
return op_list;
}
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
pe_working_set_t * data_set)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
pe_node_t *this_node = NULL;
xmlNode *node_state = NULL;
for (node_state = pcmk__xe_first_child(status); node_state != NULL;
node_state = pcmk__xe_next(node_state)) {
if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
const char *uname = crm_element_value(node_state, XML_ATTR_UNAME);
if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
continue;
}
this_node = pe_find_node(data_set->nodes, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pe__is_guest_or_remote_node(this_node)) {
determine_remote_online_status(data_set, this_node);
} else {
determine_online_status(node_state, this_node, data_set);
}
if (this_node->details->online
|| pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
*/
xmlNode *lrm_rsc = NULL;
tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL;
lrm_rsc = pcmk__xe_next(lrm_rsc)) {
if (pcmk__str_eq((const char *)lrm_rsc->name,
XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID);
if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
continue;
}
intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
output = g_list_concat(output, intermediate);
}
}
}
}
}
return output;
}
diff --git a/tools/attrd_updater.c b/tools/attrd_updater.c
index 2b5a462359..58edb2678a 100644
--- a/tools/attrd_updater.c
+++ b/tools/attrd_updater.c
@@ -1,465 +1,481 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define SUMMARY "query and update Pacemaker node attributes"
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
GError *error = NULL;
bool printed_values = false;
struct {
char command;
gchar *attr_dampen;
gchar *attr_name;
gchar *attr_node;
gchar *attr_set;
char *attr_value;
uint32_t attr_options;
gboolean query_all;
gboolean quiet;
} options = {
.attr_options = pcmk__node_attr_none,
.command = 'Q',
};
static gboolean
command_cb (const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
pcmk__str_update(&options.attr_value, optarg);
if (pcmk__str_any_of(option_name, "--update-both", "-B", NULL)) {
options.command = 'B';
} else if (pcmk__str_any_of(option_name, "--delete", "-D", NULL)) {
options.command = 'D';
} else if (pcmk__str_any_of(option_name, "--query", "-Q", NULL)) {
options.command = 'Q';
} else if (pcmk__str_any_of(option_name, "--refresh", "-R", NULL)) {
options.command = 'R';
} else if (pcmk__str_any_of(option_name, "--update", "-U", "-v", NULL)) {
options.command = 'U';
} else if (pcmk__str_any_of(option_name, "--update-delay", "-Y", NULL)) {
options.command = 'Y';
}
return TRUE;
}
static gboolean
private_cb (const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
pcmk__set_node_attr_flags(options.attr_options, pcmk__node_attr_private);
return TRUE;
}
static gboolean
section_cb (const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (pcmk__str_any_of(optarg, "nodes", "forever", NULL)) {
pcmk__set_node_attr_flags(options.attr_options, pcmk__node_attr_perm);
} else if (pcmk__str_any_of(optarg, "status", "reboot", NULL)) {
pcmk__clear_node_attr_flags(options.attr_options, pcmk__node_attr_perm);
} else {
g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_USAGE, "Unknown value for --lifetime: %s",
optarg);
return FALSE;
}
return TRUE;
}
+static gboolean
+attr_set_type_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
+ if (pcmk__str_any_of(option_name, "-z", "--utilization", NULL)) {
+ pcmk__set_node_attr_flags(options.attr_options, pcmk__node_attr_utilization);
+ }
+
+ return TRUE;
+}
+
static gboolean
wait_cb (const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
if (pcmk__str_eq(optarg, "no", pcmk__str_none)) {
pcmk__clear_node_attr_flags(options.attr_options, pcmk__node_attr_sync_local | pcmk__node_attr_sync_cluster);
return TRUE;
} else if (pcmk__str_eq(optarg, PCMK__VALUE_LOCAL, pcmk__str_none)) {
pcmk__clear_node_attr_flags(options.attr_options, pcmk__node_attr_sync_local | pcmk__node_attr_sync_cluster);
pcmk__set_node_attr_flags(options.attr_options, pcmk__node_attr_sync_local);
return TRUE;
} else if (pcmk__str_eq(optarg, PCMK__VALUE_CLUSTER, pcmk__str_none)) {
pcmk__clear_node_attr_flags(options.attr_options, pcmk__node_attr_sync_local | pcmk__node_attr_sync_cluster);
pcmk__set_node_attr_flags(options.attr_options, pcmk__node_attr_sync_cluster);
return TRUE;
} else {
g_set_error(err, PCMK__EXITC_ERROR, CRM_EX_USAGE,
"--wait= must be one of 'no', 'local', 'cluster'");
return FALSE;
}
}
#define INDENT " "
static GOptionEntry required_entries[] = {
{ "name", 'n', 0, G_OPTION_ARG_STRING, &options.attr_name,
"The attribute's name",
"NAME" },
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "update", 'U', 0, G_OPTION_ARG_CALLBACK, command_cb,
"Update attribute's value in pacemaker-attrd. If this causes the value\n"
INDENT "to change, it will also be updated in the cluster configuration.",
"VALUE" },
{ "update-both", 'B', 0, G_OPTION_ARG_CALLBACK, command_cb,
"Update attribute's value and time to wait (dampening) in\n"
INDENT "pacemaker-attrd. If this causes the value or dampening to change,\n"
INDENT "the attribute will also be written to the cluster configuration,\n"
INDENT "so be aware that repeatedly changing the dampening reduces its\n"
INDENT "effectiveness.\n"
INDENT "Requires -d/--delay",
"VALUE" },
{ "update-delay", 'Y', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Update attribute's dampening in pacemaker-attrd. If this causes\n"
INDENT "the dampening to change, the attribute will also be written\n"
INDENT "to the cluster configuration, so be aware that repeatedly\n"
INDENT "changing the dampening reduces its effectiveness.\n"
INDENT "Requires -d/--delay",
NULL },
{ "query", 'Q', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Query the attribute's value from pacemaker-attrd",
NULL },
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"Unset attribute from pacemaker-attrd. At the moment, there is no way\n"
INDENT "to remove an attribute. This option will instead set its value\n"
INDENT "to the empty string.",
NULL },
{ "refresh", 'R', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, command_cb,
"(Advanced) Force the pacemaker-attrd daemon to resend all current\n"
INDENT "values to the CIB",
NULL },
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "delay", 'd', 0, G_OPTION_ARG_STRING, &options.attr_dampen,
"The time to wait (dampening) in seconds for further changes\n"
INDENT "before sending to the CIB",
"SECONDS" },
{ "set", 's', 0, G_OPTION_ARG_STRING, &options.attr_set,
"(Advanced) The attribute set in which to place the value",
"SET" },
{ "node", 'N', 0, G_OPTION_ARG_STRING, &options.attr_node,
"Set the attribute for the named node (instead of the local one)",
"NODE" },
{ "all", 'A', 0, G_OPTION_ARG_NONE, &options.query_all,
"Show values of the attribute for all nodes (query only)",
NULL },
{ "lifetime", 'l', 0, G_OPTION_ARG_CALLBACK, section_cb,
"(Not yet implemented) Lifetime of the node attribute (silently\n"
INDENT "ignored by cluster)",
"SECTION" },
{ "private", 'p', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, private_cb,
"If this creates a new attribute, never write the attribute to CIB",
NULL },
{ "wait", 'W', 0, G_OPTION_ARG_CALLBACK, wait_cb,
"Wait for some event to occur before returning. Values are 'no' (wait\n"
INDENT "only for the attribute daemon to acknowledge the request),\n"
INDENT "'local' (wait until the change has propagated to where a local\n"
INDENT "query will return the request value, or the value set by a\n"
INDENT "later request), or 'cluster' (wait until the change has propagated\n"
INDENT "to where a query anywhere on the cluster will return the requested\n"
INDENT "value, or the value set by a later request). Default is 'no'.",
"UNTIL" },
+ { "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, attr_set_type_cb,
+ "When creating a new attribute, create it as a node utilization attribute\n"
+ INDENT "instead of an instance attribute. If the attribute already exists,\n"
+ INDENT "its existing type (utilization vs. instance) will be used regardless.\n"
+ INDENT "(with -B, -U, -Y)",
+ NULL },
+
{ NULL }
};
static GOptionEntry deprecated_entries[] = {
{ "quiet", 'q', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.quiet,
NULL,
NULL },
{ "update", 'v', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, command_cb,
NULL,
NULL },
{ "section", 'S', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, section_cb,
NULL,
NULL },
{ NULL }
};
static int send_attrd_query(pcmk__output_t *out, const char *attr_name, const char *attr_node,
gboolean query_all);
static int send_attrd_update(char command, const char *attr_node, const char *attr_name,
const char *attr_value, const char *attr_set,
const char *attr_dampen, uint32_t attr_options);
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
pcmk__add_arg_group(context, "required", "Required Arguments:",
"Show required arguments", required_entries);
pcmk__add_arg_group(context, "command", "Command:",
"Show command options (mutually exclusive)", command_entries);
pcmk__add_arg_group(context, "additional", "Additional Options:",
"Show additional options", addl_entries);
pcmk__add_arg_group(context, "deprecated", "Deprecated Options:",
"Show deprecated options", deprecated_entries);
return context;
}
int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
crm_exit_t exit_code = CRM_EX_OK;
pcmk__output_t *out = NULL;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
GOptionContext *context = build_arg_context(args, &output_group);
gchar **processed_args = pcmk__cmdline_preproc(argv, "dlnsvBNUS");
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("attrd_updater", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
goto done;
}
if (args->version) {
out->version(out, false);
goto done;
}
if (options.command != 'R' && options.attr_name == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Command requires --name argument");
goto done;
} else if ((options.command == 'B'|| options.command == 'Y') && options.attr_dampen == NULL) {
out->info(out, "Warning: '%c' command given without required --delay", options.command);
}
pcmk__register_lib_messages(out);
if (options.command == 'Q') {
int rc = send_attrd_query(out, options.attr_name, options.attr_node, options.query_all);
exit_code = pcmk_rc2exitc(rc);
} else {
/* @TODO We don't know whether the specified node is a Pacemaker Remote
* node or not, so we can't set pcmk__node_attr_remote when appropriate.
* However, it's not a big problem, because pacemaker-attrd will learn
* and remember a node's "remoteness".
*/
int rc = send_attrd_update(options.command, options.attr_node,
options.attr_name, options.attr_value,
options.attr_set, options.attr_dampen,
options.attr_options);
exit_code = pcmk_rc2exitc(rc);
}
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
g_free(options.attr_dampen);
g_free(options.attr_name);
g_free(options.attr_node);
g_free(options.attr_set);
free(options.attr_value);
pcmk__output_and_clear_error(error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
crm_exit(exit_code);
}
/*!
* \brief Print the attribute values in a pacemaker-attrd XML query reply
*
* \param[in] reply List of attribute name/value pairs
* \param[in] attr_name Name of attribute that was queried
*
* \return true if any values were printed
*/
static void
print_attrd_values(pcmk__output_t *out, GList *reply)
{
for (GList *iter = reply; iter != NULL; iter = iter->next) {
pcmk__attrd_query_pair_t *pair = (pcmk__attrd_query_pair_t *) iter->data;
out->message(out, "attribute", NULL, NULL, pair->name, pair->value,
pair->node);
printed_values = true;
}
}
static void
attrd_event_cb(pcmk_ipc_api_t *attrd_api, enum pcmk_ipc_event event_type,
crm_exit_t status, void *event_data, void *user_data)
{
pcmk__output_t *out = (pcmk__output_t *) user_data;
pcmk__attrd_api_reply_t *reply = event_data;
if (event_type != pcmk_ipc_event_reply || status != CRM_EX_OK) {
return;
}
/* Print the values from the reply. */
if (reply->reply_type == pcmk__attrd_reply_query) {
print_attrd_values(out, reply->data.pairs);
}
}
/*!
* \brief Submit a query to pacemaker-attrd and print reply
*
* \param[in] attr_name Name of attribute to be affected by request
* \param[in] attr_node Name of host to query for (or NULL for localhost)
* \param[in] query_all If TRUE, ignore attr_node and query all nodes instead
*
* \return Standard Pacemaker return code
*/
static int
send_attrd_query(pcmk__output_t *out, const char *attr_name, const char *attr_node, gboolean query_all)
{
pcmk_ipc_api_t *attrd_api = NULL;
int rc = pcmk_rc_ok;
// Create attrd IPC object
rc = pcmk_new_ipc_api(&attrd_api, pcmk_ipc_attrd);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not connect to attrd: %s", pcmk_rc_str(rc));
return ENOTCONN;
}
pcmk_register_ipc_callback(attrd_api, attrd_event_cb, out);
// Connect to attrd (without main loop)
rc = pcmk_connect_ipc(attrd_api, pcmk_ipc_dispatch_sync);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not connect to attrd: %s", pcmk_rc_str(rc));
pcmk_free_ipc_api(attrd_api);
return rc;
}
/* Decide which node(s) to query */
if (query_all == TRUE) {
attr_node = NULL;
}
rc = pcmk__attrd_api_query(attrd_api, attr_node, attr_name, 0);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not query value of %s: %s (%d)",
attr_name, pcmk_strerror(rc), rc);
} else if (!printed_values) {
rc = pcmk_rc_schema_validation;
g_set_error(&error, PCMK__RC_ERROR, rc,
"Could not query value of %s: attribute does not exist", attr_name);
}
pcmk_disconnect_ipc(attrd_api);
pcmk_free_ipc_api(attrd_api);
return rc;
}
static int
send_attrd_update(char command, const char *attr_node, const char *attr_name,
const char *attr_value, const char *attr_set,
const char *attr_dampen, uint32_t attr_options)
{
int rc = pcmk_rc_ok;
switch (command) {
case 'B':
rc = pcmk__attrd_api_update(NULL, attr_node, attr_name, attr_value,
attr_dampen, attr_set, NULL,
attr_options | pcmk__node_attr_value | pcmk__node_attr_delay);
break;
case 'D':
rc = pcmk__attrd_api_delete(NULL, attr_node, attr_name, attr_options);
break;
case 'R':
rc = pcmk__attrd_api_refresh(NULL, attr_node);
break;
case 'U':
rc = pcmk__attrd_api_update(NULL, attr_node, attr_name, attr_value,
NULL, attr_set, NULL,
attr_options | pcmk__node_attr_value);
break;
case 'Y':
rc = pcmk__attrd_api_update(NULL, attr_node, attr_name, NULL,
attr_dampen, attr_set, NULL,
attr_options | pcmk__node_attr_delay);
break;
}
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not update %s=%s: %s (%d)",
attr_name, attr_value, pcmk_rc_str(rc), rc);
}
return rc;
}
diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c
index b5159ab0eb..8c6b6a7ab3 100644
--- a/tools/crm_attribute.c
+++ b/tools/crm_attribute.c
@@ -1,848 +1,852 @@
/*
* Copyright 2004-2022 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
* This source code is licensed under the GNU General Public License version 2
* or later (GPLv2+) WITHOUT ANY WARRANTY.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define SUMMARY "crm_attribute - query and update Pacemaker cluster options and node attributes"
GError *error = NULL;
crm_exit_t exit_code = CRM_EX_OK;
uint64_t cib_opts = cib_sync_call;
PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *",
"const char *", "const char *")
static int
attribute_text(pcmk__output_t *out, va_list args)
{
const char *scope = va_arg(args, const char *);
const char *instance = va_arg(args, const char *);
const char *name = va_arg(args, const char *);
const char *value = va_arg(args, const char *);
const char *host G_GNUC_UNUSED = va_arg(args, const char *);
if (out->quiet) {
if (value != NULL) {
pcmk__formatted_printf(out, "%s\n", value);
}
} else {
out->info(out, "%s%s %s%s %s%s value=%s",
scope ? "scope=" : "", scope ? scope : "",
instance ? "id=" : "", instance ? instance : "",
name ? "name=" : "", name ? name : "",
value ? value : "(null)");
}
return pcmk_rc_ok;
}
static pcmk__supported_format_t formats[] = {
PCMK__SUPPORTED_FORMAT_NONE,
PCMK__SUPPORTED_FORMAT_TEXT,
PCMK__SUPPORTED_FORMAT_XML,
{ NULL, NULL, NULL }
};
static pcmk__message_entry_t fmt_functions[] = {
{ "attribute", "text", attribute_text },
{ NULL, NULL, NULL }
};
struct {
char command;
gchar *attr_default;
gchar *attr_id;
gchar *attr_name;
gchar *attr_pattern;
char *attr_value;
char *dest_node;
gchar *dest_uname;
gboolean inhibit;
gchar *set_name;
char *set_type;
gchar *type;
gboolean promotion_score;
} options = {
.command = 'G',
.promotion_score = FALSE
};
#define INDENT " "
static gboolean
delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.command = 'D';
pcmk__str_update(&options.attr_value, NULL);
return TRUE;
}
static gboolean
promotion_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
char *score_name = NULL;
options.promotion_score = TRUE;
if (options.attr_name) {
g_free(options.attr_name);
}
score_name = pcmk_promotion_score_name(optarg);
if (score_name != NULL) {
options.attr_name = g_strdup(score_name);
free(score_name);
} else {
options.attr_name = NULL;
}
return TRUE;
}
static gboolean
update_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.command = 'u';
pcmk__str_update(&options.attr_value, optarg);
return TRUE;
}
static gboolean
utilization_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (options.type) {
g_free(options.type);
}
options.type = g_strdup(XML_CIB_TAG_NODES);
pcmk__str_update(&options.set_type, XML_TAG_UTILIZATION);
return TRUE;
}
static gboolean
value_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
options.command = 'G';
pcmk__str_update(&options.attr_value, NULL);
return TRUE;
}
static GOptionEntry selecting_entries[] = {
{ "id", 'i', 0, G_OPTION_ARG_STRING, &options.attr_id,
"(Advanced) Operate on instance of specified attribute with this\n"
INDENT "XML ID",
"XML_ID"
},
{ "name", 'n', 0, G_OPTION_ARG_STRING, &options.attr_name,
"Operate on attribute or option with this name. For queries, this\n"
INDENT "is optional, in which case all matching attributes will be\n"
INDENT "returned.",
"NAME"
},
{ "pattern", 'P', 0, G_OPTION_ARG_STRING, &options.attr_pattern,
"Operate on all attributes matching this pattern\n"
INDENT "(with -G, or with -v/-D and -l reboot)",
"PATTERN"
},
{ "promotion", 'p', G_OPTION_FLAG_OPTIONAL_ARG, G_OPTION_ARG_CALLBACK, promotion_cb,
"Operate on node attribute used as promotion score for specified\n"
INDENT "resource, or resource given in OCF_RESOURCE_INSTANCE environment\n"
INDENT "variable if none is specified; this also defaults -l/--lifetime\n"
INDENT "to reboot (normally invoked from an OCF resource agent)",
"RESOURCE"
},
{ "set-name", 's', 0, G_OPTION_ARG_STRING, &options.set_name,
"(Advanced) Operate on instance of specified attribute that is\n"
INDENT "within set with this XML ID",
"NAME"
},
{ NULL }
};
static GOptionEntry command_entries[] = {
{ "delete", 'D', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, delete_cb,
"Delete the attribute/option",
NULL
},
{ "query", 'G', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, value_cb,
"Query the current value of the attribute/option.\n"
INDENT "See also: -n, -P",
NULL
},
{ "update", 'v', 0, G_OPTION_ARG_CALLBACK, update_cb,
"Update the value of the attribute/option",
"VALUE"
},
{ NULL }
};
static GOptionEntry addl_entries[] = {
{ "default", 'd', 0, G_OPTION_ARG_STRING, &options.attr_default,
"(Advanced) Default value to display if none is found in configuration",
"VALUE"
},
{ "lifetime", 'l', 0, G_OPTION_ARG_STRING, &options.type,
"Lifetime of the node attribute.\n"
INDENT "Valid values: reboot, forever",
"LIFETIME"
},
{ "node", 'N', 0, G_OPTION_ARG_STRING, &options.dest_uname,
"Set a node attribute for named node (instead of a cluster option).\n"
INDENT "See also: -l",
"NODE"
},
{ "type", 't', 0, G_OPTION_ARG_STRING, &options.type,
"Which part of the configuration to update/delete/query the option in.\n"
INDENT "Valid values: crm_config, rsc_defaults, op_defaults, tickets",
"SECTION"
},
{ "utilization", 'z', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, utilization_cb,
"Set an utilization attribute for the node.",
NULL
},
{ "inhibit-policy-engine", '!', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &options.inhibit,
NULL, NULL
},
{ NULL }
};
static GOptionEntry deprecated_entries[] = {
{ "attr-id", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.attr_id,
NULL, NULL
},
{ "attr-name", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.attr_name,
NULL, NULL
},
{ "attr-value", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, update_cb,
NULL, NULL
},
{ "delete-attr", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_CALLBACK, delete_cb,
NULL, NULL
},
{ "get-value", 0, G_OPTION_FLAG_HIDDEN|G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK, value_cb,
NULL, NULL
},
{ "node-uname", 'U', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_STRING, &options.dest_uname,
NULL, NULL
},
{ NULL }
};
static void
get_node_name_from_local(void)
{
char *hostname = pcmk_hostname();
g_free(options.dest_uname);
/* This silliness is so that dest_uname is always a glib-managed
* string so we know how to free it later. pcmk_hostname returns
* a newly allocated string via strdup.
*/
options.dest_uname = g_strdup(hostname);
free(hostname);
}
static int
send_attrd_update(char command, const char *attr_node, const char *attr_name,
const char *attr_value, const char *attr_set,
const char *attr_dampen, uint32_t attr_options)
{
int rc = pcmk_rc_ok;
uint32_t opts = attr_options;
if (options.attr_pattern) {
opts |= pcmk__node_attr_pattern;
}
switch (command) {
case 'D':
rc = pcmk__attrd_api_delete(NULL, attr_node, attr_name, opts);
break;
case 'u':
rc = pcmk__attrd_api_update(NULL, attr_node, attr_name,
attr_value, NULL, attr_set, NULL,
opts | pcmk__node_attr_value);
break;
}
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not update %s=%s: %s (%d)",
attr_name, attr_value, pcmk_rc_str(rc), rc);
}
return rc;
}
struct delete_data_s {
pcmk__output_t *out;
cib_t *cib;
};
static int
delete_attr_on_node(xmlNode *child, void *userdata)
{
struct delete_data_s *dd = (struct delete_data_s *) userdata;
const char *attr_name = crm_element_value(child, XML_NVPAIR_ATTR_NAME);
int rc = pcmk_rc_ok;
if (!pcmk__str_eq(attr_name, options.attr_pattern, pcmk__str_regex)) {
return pcmk_rc_ok;
}
rc = cib__delete_node_attr(dd->out, dd->cib, cib_opts, options.type,
options.dest_node, options.set_type,
options.set_name, options.attr_id,
attr_name, options.attr_value, NULL);
if (rc == ENXIO) {
rc = pcmk_rc_ok;
}
return rc;
}
static int
command_delete(pcmk__output_t *out, cib_t *cib)
{
int rc = pcmk_rc_ok;
xmlNode *result = NULL;
bool use_pattern = options.attr_pattern != NULL;
/* See the comment in command_query regarding xpath and regular expressions. */
if (use_pattern) {
struct delete_data_s dd = { out, cib };
rc = cib__get_node_attrs(out, cib, options.type, options.dest_node,
options.set_type, options.set_name, NULL, NULL,
NULL, &result);
if (rc != pcmk_rc_ok) {
goto done_deleting;
}
rc = pcmk__xe_foreach_child(result, NULL, delete_attr_on_node, &dd);
if (rc != pcmk_rc_ok) {
goto done_deleting;
}
} else {
rc = cib__delete_node_attr(out, cib, cib_opts, options.type, options.dest_node,
options.set_type, options.set_name, options.attr_id,
options.attr_name, options.attr_value, NULL);
}
done_deleting:
free_xml(result);
if (rc == ENXIO) {
/* Nothing to delete...
* which means it's not there...
* which is what the admin wanted
*/
rc = pcmk_rc_ok;
}
return rc;
}
struct update_data_s {
pcmk__output_t *out;
cib_t *cib;
int is_remote_node;
};
static int
update_attr_on_node(xmlNode *child, void *userdata)
{
struct update_data_s *ud = (struct update_data_s *) userdata;
const char *attr_name = crm_element_value(child, XML_NVPAIR_ATTR_NAME);
if (!pcmk__str_eq(attr_name, options.attr_pattern, pcmk__str_regex)) {
return pcmk_rc_ok;
}
return cib__update_node_attr(ud->out, ud->cib, cib_opts, options.type,
options.dest_node, options.set_type,
options.set_name, options.attr_id,
attr_name, options.attr_value, NULL,
ud->is_remote_node ? "remote" : NULL);
}
static int
command_update(pcmk__output_t *out, cib_t *cib, int is_remote_node)
{
int rc = pcmk_rc_ok;
xmlNode *result = NULL;
bool use_pattern = options.attr_pattern != NULL;
CRM_LOG_ASSERT(options.type != NULL);
CRM_LOG_ASSERT(options.attr_name != NULL);
CRM_LOG_ASSERT(options.attr_value != NULL);
/* See the comment in command_query regarding xpath and regular expressions. */
if (use_pattern) {
struct update_data_s ud = { out, cib, is_remote_node };
rc = cib__get_node_attrs(out, cib, options.type, options.dest_node,
options.set_type, options.set_name, NULL, NULL,
NULL, &result);
if (rc != pcmk_rc_ok) {
goto done_updating;
}
rc = pcmk__xe_foreach_child(result, NULL, update_attr_on_node, &ud);
if (rc != pcmk_rc_ok) {
goto done_updating;
}
} else {
rc = cib__update_node_attr(out, cib, cib_opts, options.type,
options.dest_node, options.set_type,
options.set_name, options.attr_id,
options.attr_name, options.attr_value,
NULL, is_remote_node ? "remote" : NULL);
}
done_updating:
free_xml(result);
return rc;
}
struct output_data_s {
pcmk__output_t *out;
bool use_pattern;
bool did_output;
};
static int
output_one_attribute(xmlNode *node, void *userdata)
{
struct output_data_s *od = (struct output_data_s *) userdata;
const char *name = crm_element_value(node, XML_NVPAIR_ATTR_NAME);
const char *value = crm_element_value(node, XML_NVPAIR_ATTR_VALUE);
const char *host = crm_element_value(node, PCMK__XA_ATTR_NODE_NAME);
const char *type = options.type;
const char *attr_id = options.attr_id;
if (od->use_pattern && !pcmk__str_eq(name, options.attr_pattern, pcmk__str_regex)) {
return pcmk_rc_ok;
}
od->out->message(od->out, "attribute", type, attr_id, name, value, host);
od->did_output = true;
crm_info("Read %s='%s' %s%s",
pcmk__s(name, ""), pcmk__s(value, ""),
options.set_name ? "in " : "", options.set_name ? options.set_name : "");
return pcmk_rc_ok;
}
static int
command_query(pcmk__output_t *out, cib_t *cib)
{
int rc = pcmk_rc_ok;
xmlNode *result = NULL;
bool use_pattern = options.attr_pattern != NULL;
/* libxml2 doesn't support regular expressions in xpath queries (which is how
* cib__get_node_attrs -> find_attr finds attributes). So instead, we'll just
* find all the attributes for a given node here by passing NULL for attr_id
* and attr_name, and then later see if they match the given pattern.
*/
if (use_pattern) {
rc = cib__get_node_attrs(out, cib, options.type, options.dest_node,
options.set_type, options.set_name, NULL,
NULL, NULL, &result);
} else {
rc = cib__get_node_attrs(out, cib, options.type, options.dest_node,
options.set_type, options.set_name, options.attr_id,
options.attr_name, NULL, &result);
}
if (rc == ENXIO && options.attr_default) {
/* Make static analysis happy */
const char *type = options.type;
const char *attr_id = options.attr_id;
const char *attr_name = options.attr_name;
const char *attr_default = options.attr_default;
const char *dest_uname = options.dest_uname;
out->message(out, "attribute", type, attr_id, attr_name, attr_default,
dest_uname);
rc = pcmk_rc_ok;
} else if (rc != pcmk_rc_ok) {
// Don't do anything.
} else if (xml_has_children(result)) {
struct output_data_s od = { out, use_pattern, false };
pcmk__xe_foreach_child(result, NULL, output_one_attribute, &od);
if (!od.did_output) {
rc = ENXIO;
}
} else {
struct output_data_s od = { out, use_pattern, false };
output_one_attribute(result, &od);
}
free_xml(result);
return rc;
}
static void
set_type(void)
{
if (options.type == NULL) {
if (options.promotion_score) {
// Updating a promotion score node attribute
options.type = g_strdup(XML_CIB_TAG_STATUS);
} else if (options.dest_uname != NULL) {
// Updating some other node attribute
options.type = g_strdup(XML_CIB_TAG_NODES);
} else {
// Updating cluster options
options.type = g_strdup(XML_CIB_TAG_CRMCONFIG);
}
} else if (pcmk__str_eq(options.type, "reboot", pcmk__str_casei)) {
options.type = g_strdup(XML_CIB_TAG_STATUS);
} else if (pcmk__str_eq(options.type, "forever", pcmk__str_casei)) {
options.type = g_strdup(XML_CIB_TAG_NODES);
}
}
static bool
use_attrd(void)
{
/* Only go through the attribute manager for transient attributes, and
* then only if we're not using a file as the CIB.
*/
return pcmk__str_eq(options.type, XML_CIB_TAG_STATUS, pcmk__str_casei) &&
getenv("CIB_file") == NULL && getenv("CIB_shadow") == NULL;
}
static bool
try_ipc_update(void)
{
return use_attrd() && (options.command == 'D' || options.command == 'u');
}
static bool
pattern_used_correctly(void)
{
/* --pattern can only be used with:
* -G (query), or
* -v (update) or -D (delete), with till-reboot
*/
return options.command == 'G' ||
((options.command == 'u' || options.command == 'D') &&
pcmk__str_eq(options.type, XML_CIB_TAG_STATUS, pcmk__str_casei));
}
static bool
delete_used_correctly(void)
{
return options.command != 'D' || options.attr_name != NULL || options.attr_pattern != NULL;
}
static GOptionContext *
build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
{ "quiet", 'q', 0, G_OPTION_ARG_NONE, &(args->quiet),
"Print only the value on stdout",
NULL },
{ "quiet", 'Q', G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &(args->quiet),
NULL, NULL
},
{ NULL }
};
const char *description = "Examples:\n\n"
"Add new node attribute called 'location' with the value of 'office' for host 'myhost':\n\n"
"\tcrm_attribute --node myhost --name location --update office\n\n"
"Query the value of the 'location' node attribute for host 'myhost':\n\n"
"\tcrm_attribute --node myhost --name location --query\n\n"
"Change the value of the 'location' node attribute for host 'myhost':\n\n"
"\tcrm_attribute --node myhost --name location --update backoffice\n\n"
"Delete the 'location' node attribute for host 'myhost':\n\n"
"\tcrm_attribute --node myhost --name location --delete\n\n"
"Query the value of the 'cluster-delay' cluster option:\n\n"
"\tcrm_attribute --type crm_config --name cluster-delay --query\n\n"
"Query value of the 'cluster-delay' cluster option and print only the value:\n\n"
"\tcrm_attribute --type crm_config --name cluster-delay --query --quiet\n\n";
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
pcmk__add_main_args(context, extra_prog_entries);
g_option_context_set_description(context, description);
pcmk__add_arg_group(context, "selections", "Selecting attributes:",
"Show selecting options", selecting_entries);
pcmk__add_arg_group(context, "command", "Commands:",
"Show command options", command_entries);
pcmk__add_arg_group(context, "additional", "Additional options:",
"Show additional options", addl_entries);
pcmk__add_arg_group(context, "deprecated", "Deprecated Options:",
"Show deprecated options", deprecated_entries);
return context;
}
int
main(int argc, char **argv)
{
cib_t *the_cib = NULL;
int is_remote_node = 0;
int attrd_opts = pcmk__node_attr_none;
int rc = pcmk_rc_ok;
pcmk__output_t *out = NULL;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "NPUdilnpstv");
GOptionContext *context = build_arg_context(args, &output_group);
pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
}
pcmk__cli_init_logging("crm_attribute", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_ERROR;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s",
args->output_ty, pcmk_rc_str(rc));
goto done;
}
pcmk__register_lib_messages(out);
pcmk__register_messages(out, fmt_functions);
if (args->version) {
out->version(out, false);
goto done;
}
out->quiet = args->quiet;
if (options.promotion_score && options.attr_name == NULL) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"-p/--promotion must be called from an OCF resource agent "
"or with a resource ID specified");
goto done;
}
if (options.inhibit) {
crm_warn("Inhibiting notifications for this update");
cib__set_call_options(cib_opts, crm_system_name, cib_inhibit_notify);
}
the_cib = cib_new();
rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not connect to the CIB: %s", pcmk_rc_str(rc));
goto done;
}
set_type();
// Use default node if not given (except for cluster options and tickets)
if (!pcmk__strcase_any_of(options.type, XML_CIB_TAG_CRMCONFIG, XML_CIB_TAG_TICKETS,
NULL)) {
/* If we are being called from a resource agent via the cluster,
* the correct local node name will be passed as an environment
* variable. Otherwise, we have to ask the cluster.
*/
const char *target = pcmk__node_attr_target(options.dest_uname);
if (target != NULL) {
g_free(options.dest_uname);
options.dest_uname = g_strdup(target);
} else if (getenv("CIB_file") != NULL && options.dest_uname == NULL) {
get_node_name_from_local();
}
if (options.dest_uname == NULL) {
char *node_name = NULL;
rc = pcmk__query_node_name(out, 0, &node_name, 0);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
free(node_name);
goto done;
}
options.dest_uname = g_strdup(node_name);
free(node_name);
}
rc = query_node_uuid(the_cib, options.dest_uname, &options.dest_node, &is_remote_node);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Could not map name=%s to a UUID", options.dest_uname);
goto done;
}
}
if (!delete_used_correctly()) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error: must specify attribute name or pattern to delete");
goto done;
}
if (options.attr_pattern) {
if (!pattern_used_correctly()) {
exit_code = CRM_EX_USAGE;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error: pattern can only be used with query, or with "
"till-reboot update or delete");
goto done;
}
g_free(options.attr_name);
options.attr_name = options.attr_pattern;
}
if (is_remote_node) {
attrd_opts = pcmk__node_attr_remote;
}
+ if (pcmk__str_eq(options.set_type, XML_TAG_UTILIZATION, pcmk__str_none)) {
+ attrd_opts |= pcmk__node_attr_utilization;
+ }
+
if (try_ipc_update() &&
(send_attrd_update(options.command, options.dest_uname, options.attr_name,
options.attr_value, options.set_name, NULL, attrd_opts) == pcmk_rc_ok)) {
crm_info("Update %s=%s sent via pacemaker-attrd",
options.attr_name, ((options.command == 'D')? "" : options.attr_value));
} else if (options.command == 'D') {
rc = command_delete(out, the_cib);
} else if (options.command == 'u') {
rc = command_update(out, the_cib, is_remote_node);
} else {
rc = command_query(out, the_cib);
}
if (rc == ENOTUNIQ) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Please choose from one of the matches below and supply the 'id' with --attr-id");
} else if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
"Error performing operation: %s", pcmk_strerror(rc));
}
done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
free(options.attr_default);
g_free(options.attr_id);
g_free(options.attr_name);
free(options.attr_value);
free(options.dest_node);
g_free(options.dest_uname);
g_free(options.set_name);
free(options.set_type);
g_free(options.type);
cib__clean_up_connection(&the_cib);
pcmk__output_and_clear_error(error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
}
pcmk__unregister_formats();
return crm_exit(exit_code);
}