diff --git a/daemons/controld/controld_te_utils.c b/daemons/controld/controld_te_utils.c index c0c7bea1ba..616119a170 100644 --- a/daemons/controld/controld_te_utils.c +++ b/daemons/controld/controld_te_utils.c @@ -1,509 +1,509 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include //! Triggers transition graph processing static crm_trigger_t *transition_trigger = NULL; static GHashTable *node_pending_timers = NULL; gboolean stop_te_timer(pcmk__graph_action_t *action) { if (action == NULL) { return FALSE; } if (action->timer != 0) { crm_trace("Stopping action timer"); g_source_remove(action->timer); action->timer = 0; } else { crm_trace("Action timer was already stopped"); return FALSE; } return TRUE; } static gboolean te_graph_trigger(gpointer user_data) { if (controld_globals.transition_graph == NULL) { crm_debug("Nothing to do"); return TRUE; } crm_trace("Invoking graph %d in state %s", controld_globals.transition_graph->id, fsa_state2string(controld_globals.fsa_state)); switch (controld_globals.fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: return TRUE; default: break; } if (!controld_globals.transition_graph->complete) { enum pcmk__graph_status graph_rc; int orig_limit = controld_globals.transition_graph->batch_limit; int throttled_limit = throttle_get_total_job_limit(orig_limit); controld_globals.transition_graph->batch_limit = throttled_limit; graph_rc = pcmk__execute_graph(controld_globals.transition_graph); controld_globals.transition_graph->batch_limit = orig_limit; if (graph_rc == pcmk__graph_active) { crm_trace("Transition not yet complete"); return TRUE; } else if (graph_rc == pcmk__graph_pending) { crm_trace("Transition not yet complete - no actions fired"); return TRUE; } if (graph_rc != pcmk__graph_complete) { crm_warn("Transition failed: %s", pcmk__graph_status2text(graph_rc)); pcmk__log_graph(LOG_NOTICE, controld_globals.transition_graph); } } crm_debug("Transition %d is now complete", controld_globals.transition_graph->id); controld_globals.transition_graph->complete = true; notify_crmd(controld_globals.transition_graph); return TRUE; } /*! * \internal * \brief Initialize transition trigger */ void controld_init_transition_trigger(void) { transition_trigger = mainloop_add_trigger(G_PRIORITY_LOW, te_graph_trigger, NULL); } /*! * \internal * \brief Destroy transition trigger */ void controld_destroy_transition_trigger(void) { mainloop_destroy_trigger(transition_trigger); transition_trigger = NULL; } void controld_trigger_graph_as(const char *fn, int line) { crm_trace("%s:%d - Triggered graph processing", fn, line); mainloop_set_trigger(transition_trigger); } static struct abort_timer_s { bool aborted; guint id; int priority; enum pcmk__graph_next action; const char *text; } abort_timer = { 0, }; static gboolean abort_timer_popped(gpointer data) { struct abort_timer_s *abort_timer = (struct abort_timer_s *) data; if (AM_I_DC && (abort_timer->aborted == FALSE)) { abort_transition(abort_timer->priority, abort_timer->action, abort_timer->text, NULL); } abort_timer->id = 0; return FALSE; // do not immediately reschedule timer } /*! * \internal * \brief Abort transition after delay, if not already aborted in that time * * \param[in] abort_text Must be literal string */ void abort_after_delay(int abort_priority, enum pcmk__graph_next abort_action, const char *abort_text, guint delay_ms) { if (abort_timer.id) { // Timer already in progress, stop and reschedule g_source_remove(abort_timer.id); } abort_timer.aborted = FALSE; abort_timer.priority = abort_priority; abort_timer.action = abort_action; abort_timer.text = abort_text; abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, &abort_timer); } static void free_node_pending_timer(gpointer data) { struct abort_timer_s *node_pending_timer = (struct abort_timer_s *) data; if (node_pending_timer->id != 0) { g_source_remove(node_pending_timer->id); node_pending_timer->id = 0; } free(node_pending_timer); } static gboolean node_pending_timer_popped(gpointer key) { struct abort_timer_s *node_pending_timer = NULL; if (node_pending_timers == NULL) { return FALSE; } node_pending_timer = g_hash_table_lookup(node_pending_timers, key); if (node_pending_timer == NULL) { return FALSE; } - crm_warn("Node with id '%s' pending timed out (%us) on joining the process " - "group", + crm_warn("Node with " PCMK_XA_ID " '%s' pending timed out (%us) " + "on joining the process group", (const char *) key, controld_globals.node_pending_timeout); if (controld_globals.node_pending_timeout > 0) { abort_timer_popped(node_pending_timer); } g_hash_table_remove(node_pending_timers, key); return FALSE; // do not reschedule timer } static void init_node_pending_timer(const crm_node_t *node, guint timeout) { struct abort_timer_s *node_pending_timer = NULL; char *key = NULL; if (node->uuid == NULL) { return; } if (node_pending_timers == NULL) { node_pending_timers = pcmk__strikey_table(free, free_node_pending_timer); // The timer is somehow already existing } else if (g_hash_table_lookup(node_pending_timers, node->uuid) != NULL) { return; } - crm_notice("Waiting for pending %s with id '%s' to join the process " - "group (timeout=%us)", + crm_notice("Waiting for pending %s with " PCMK_XA_ID " '%s' " + "to join the process group (timeout=%us)", node->uname ? node->uname : "node", node->uuid, controld_globals.node_pending_timeout); node_pending_timer = calloc(1, sizeof(struct abort_timer_s)); CRM_ASSERT(node_pending_timer != NULL); node_pending_timer->aborted = FALSE; node_pending_timer->priority = PCMK_SCORE_INFINITY; node_pending_timer->action = pcmk__graph_restart; node_pending_timer->text = "Node pending timed out"; key = strdup(node->uuid); CRM_ASSERT(key != NULL); g_hash_table_replace(node_pending_timers, key, node_pending_timer); node_pending_timer->id = g_timeout_add_seconds(timeout, node_pending_timer_popped, key); CRM_ASSERT(node_pending_timer->id != 0); } static void remove_node_pending_timer(const char *node_uuid) { if (node_pending_timers == NULL) { return; } g_hash_table_remove(node_pending_timers, node_uuid); } void controld_node_pending_timer(const crm_node_t *node) { long long remaining_timeout = 0; /* If the node is not an active cluster node, is leaving the cluster, or is * already part of CPG, or PCMK_OPT_NODE_PENDING_TIMEOUT is disabled, free * any node pending timer for it. */ if (pcmk_is_set(node->flags, crm_remote_node) || (node->when_member <= 1) || (node->when_online > 0) || (controld_globals.node_pending_timeout == 0)) { remove_node_pending_timer(node->uuid); return; } // Node is a cluster member but offline in CPG remaining_timeout = node->when_member - time(NULL) + controld_globals.node_pending_timeout; /* It already passed node pending timeout somehow. * Free any node pending timer of it. */ if (remaining_timeout <= 0) { remove_node_pending_timer(node->uuid); return; } init_node_pending_timer(node, remaining_timeout); } void controld_free_node_pending_timers(void) { if (node_pending_timers == NULL) { return; } g_hash_table_destroy(node_pending_timers); node_pending_timers = NULL; } static const char * abort2text(enum pcmk__graph_next abort_action) { switch (abort_action) { case pcmk__graph_done: return "done"; case pcmk__graph_wait: return "stop"; case pcmk__graph_restart: return "restart"; case pcmk__graph_shutdown: return "shutdown"; } return "unknown"; } static bool update_abort_priority(pcmk__graph_t *graph, int priority, enum pcmk__graph_next action, const char *abort_reason) { bool change = FALSE; if (graph == NULL) { return change; } if (graph->abort_priority < priority) { crm_debug("Abort priority upgraded from %d to %d", graph->abort_priority, priority); graph->abort_priority = priority; if (graph->abort_reason != NULL) { crm_debug("'%s' abort superseded by %s", graph->abort_reason, abort_reason); } graph->abort_reason = abort_reason; change = TRUE; } if (graph->completion_action < action) { crm_debug("Abort action %s superseded by %s: %s", abort2text(graph->completion_action), abort2text(action), abort_reason); graph->completion_action = action; change = TRUE; } return change; } void abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action, const char *abort_text, const xmlNode *reason, const char *fn, int line) { int add[] = { 0, 0, 0 }; int del[] = { 0, 0, 0 }; int level = LOG_INFO; const xmlNode *diff = NULL; const xmlNode *change = NULL; CRM_CHECK(controld_globals.transition_graph != NULL, return); switch (controld_globals.fsa_state) { case S_STARTING: case S_PENDING: case S_NOT_DC: case S_HALT: case S_ILLEGAL: case S_STOPPING: case S_TERMINATE: crm_info("Abort %s suppressed: state=%s (%scomplete)", abort_text, fsa_state2string(controld_globals.fsa_state), (controld_globals.transition_graph->complete? "" : "in")); return; default: break; } abort_timer.aborted = TRUE; controld_expect_sched_reply(NULL); if (!controld_globals.transition_graph->complete && update_abort_priority(controld_globals.transition_graph, abort_priority, abort_action, abort_text)) { level = LOG_NOTICE; } if (reason != NULL) { const xmlNode *search = NULL; for(search = reason; search; search = search->parent) { if (pcmk__xe_is(search, PCMK_XE_DIFF)) { diff = search; break; } } if(diff) { xml_patch_versions(diff, add, del); for(search = reason; search; search = search->parent) { if (pcmk__xe_is(search, PCMK_XE_CHANGE)) { change = search; break; } } } } if (reason == NULL) { do_crm_log(level, "Transition %d aborted: %s " CRM_XS " source=%s:%d " "complete=%s", controld_globals.transition_graph->id, abort_text, fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); } else if(change == NULL) { GString *local_path = pcmk__element_xpath(reason); CRM_ASSERT(local_path != NULL); do_crm_log(level, "Transition %d aborted by %s.%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, reason->name, pcmk__xe_id(reason), abort_text, add[0], add[1], add[2], fn, line, (const char *) local_path->str, pcmk__btoa(controld_globals.transition_graph->complete)); g_string_free(local_path, TRUE); } else { const char *op = crm_element_value(change, PCMK_XA_OPERATION); const char *path = crm_element_value(change, PCMK_XA_PATH); if(change == reason) { if (strcmp(op, PCMK_VALUE_CREATE) == 0) { reason = reason->children; } else if (strcmp(op, PCMK_VALUE_MODIFY) == 0) { reason = first_named_child(reason, PCMK_XE_CHANGE_RESULT); if(reason) { reason = reason->children; } } CRM_CHECK(reason != NULL, goto done); } if (strcmp(op, PCMK_VALUE_DELETE) == 0) { const char *shortpath = strrchr(path, '/'); do_crm_log(level, "Transition %d aborted by deletion of %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, (shortpath? (shortpath + 1) : path), abort_text, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); } else if (pcmk__xe_is(reason, PCMK_XE_NVPAIR)) { do_crm_log(level, "Transition %d aborted by %s doing %s %s=%s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, crm_element_value(reason, PCMK_XA_ID), op, crm_element_value(reason, PCMK_XA_NAME), crm_element_value(reason, PCMK_XA_VALUE), abort_text, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); } else if (pcmk__xe_is(reason, PCMK__XE_LRM_RSC_OP)) { const char *magic = crm_element_value(reason, PCMK__XA_TRANSITION_MAGIC); do_crm_log(level, "Transition %d aborted by operation %s '%s' on %s: %s " CRM_XS " magic=%s cib=%d.%d.%d source=%s:%d complete=%s", controld_globals.transition_graph->id, crm_element_value(reason, PCMK__XA_OPERATION_KEY), op, crm_element_value(reason, PCMK__META_ON_NODE), abort_text, magic, add[0], add[1], add[2], fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); } else if (pcmk__str_any_of((const char *) reason->name, PCMK__XE_NODE_STATE, PCMK_XE_NODE, NULL)) { const char *uname = crm_peer_uname(pcmk__xe_id(reason)); do_crm_log(level, "Transition %d aborted by %s '%s' on %s: %s " CRM_XS " cib=%d.%d.%d source=%s:%d complete=%s", controld_globals.transition_graph->id, reason->name, op, pcmk__s(uname, pcmk__xe_id(reason)), abort_text, add[0], add[1], add[2], fn, line, pcmk__btoa(controld_globals.transition_graph->complete)); } else { const char *id = pcmk__xe_id(reason); do_crm_log(level, "Transition %d aborted by %s.%s '%s': %s " CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s", controld_globals.transition_graph->id, reason->name, pcmk__s(id, ""), pcmk__s(op, "change"), abort_text, add[0], add[1], add[2], fn, line, path, pcmk__btoa(controld_globals.transition_graph->complete)); } } done: if (controld_globals.transition_graph->complete) { if (controld_get_period_transition_timer() > 0) { controld_stop_transition_timer(); controld_start_transition_timer(); } else { register_fsa_input(C_FSA_INTERNAL, I_PE_CALC, NULL); } return; } trigger_graph(); } diff --git a/lib/cluster/cluster.c b/lib/cluster/cluster.c index aefc8c1634..33aa1731de 100644 --- a/lib/cluster/cluster.c +++ b/lib/cluster/cluster.c @@ -1,407 +1,407 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "crmcluster_private.h" CRM_TRACE_INIT_DATA(cluster); /*! * \brief Get (and set if needed) a node's UUID * * \param[in,out] peer Node to check * * \return Node UUID of \p peer, or NULL if unknown */ const char * crm_peer_uuid(crm_node_t *peer) { char *uuid = NULL; // Check simple cases first, to avoid any calls that might block if (peer == NULL) { return NULL; } if (peer->uuid != NULL) { return peer->uuid; } switch (get_cluster_type()) { case pcmk_cluster_corosync: #if SUPPORT_COROSYNC uuid = pcmk__corosync_uuid(peer); #endif break; case pcmk_cluster_unknown: case pcmk_cluster_invalid: crm_err("Unsupported cluster type"); break; } peer->uuid = uuid; return peer->uuid; } /*! * \brief Connect to the cluster layer * * \param[in,out] Initialized cluster object to connect * * \return TRUE on success, otherwise FALSE */ gboolean crm_cluster_connect(crm_cluster_t *cluster) { enum cluster_type_e type = get_cluster_type(); crm_notice("Connecting to %s cluster infrastructure", name_for_cluster_type(type)); switch (type) { case pcmk_cluster_corosync: #if SUPPORT_COROSYNC crm_peer_init(); return pcmk__corosync_connect(cluster); #else break; #endif // SUPPORT_COROSYNC default: break; } return FALSE; } /*! * \brief Disconnect from the cluster layer * * \param[in,out] cluster Cluster object to disconnect */ void crm_cluster_disconnect(crm_cluster_t *cluster) { enum cluster_type_e type = get_cluster_type(); crm_info("Disconnecting from %s cluster infrastructure", name_for_cluster_type(type)); switch (type) { case pcmk_cluster_corosync: #if SUPPORT_COROSYNC crm_peer_destroy(); pcmk__corosync_disconnect(cluster); #endif // SUPPORT_COROSYNC break; default: break; } } /*! * \brief Allocate a new \p crm_cluster_t object * * \return A newly allocated \p crm_cluster_t object (guaranteed not \p NULL) * \note The caller is responsible for freeing the return value using * \p pcmk_cluster_free(). */ crm_cluster_t * pcmk_cluster_new(void) { crm_cluster_t *cluster = calloc(1, sizeof(crm_cluster_t)); CRM_ASSERT(cluster != NULL); return cluster; } /*! * \brief Free a \p crm_cluster_t object and its dynamically allocated members * * \param[in,out] cluster Cluster object to free */ void pcmk_cluster_free(crm_cluster_t *cluster) { if (cluster == NULL) { return; } free(cluster->uuid); free(cluster->uname); free(cluster); } /*! * \brief Send an XML message via the cluster messaging layer * * \param[in] node Cluster node to send message to * \param[in] service Message type to use in message host info * \param[in] data XML message to send * \param[in] ordered Ignored for currently supported messaging layers * * \return TRUE on success, otherwise FALSE */ gboolean send_cluster_message(const crm_node_t *node, enum crm_ais_msg_types service, const xmlNode *data, gboolean ordered) { switch (get_cluster_type()) { case pcmk_cluster_corosync: #if SUPPORT_COROSYNC return pcmk__cpg_send_xml(data, node, service); #endif break; default: break; } return FALSE; } /*! * \brief Get the local node's name * * \return Local node's name * \note This will fatally exit if local node name cannot be known. */ const char * get_local_node_name(void) { static char *name = NULL; if (name == NULL) { name = get_node_name(0); } return name; } /*! * \brief Get the node name corresponding to a cluster node ID * * \param[in] nodeid Node ID to check (or 0 for local node) * * \return Node name corresponding to \p nodeid * \note This will fatally exit if \p nodeid is 0 and local node name cannot be * known. */ char * get_node_name(uint32_t nodeid) { char *name = NULL; enum cluster_type_e stack = get_cluster_type(); switch (stack) { case pcmk_cluster_corosync: #if SUPPORT_COROSYNC name = pcmk__corosync_name(0, nodeid); break; #endif // SUPPORT_COROSYNC default: crm_err("Unknown cluster type: %s (%d)", name_for_cluster_type(stack), stack); } if ((name == NULL) && (nodeid == 0)) { name = pcmk_hostname(); if (name == NULL) { // @TODO Maybe let the caller decide what to do crm_err("Could not obtain the local %s node name", name_for_cluster_type(stack)); crm_exit(CRM_EX_FATAL); } crm_notice("Defaulting to uname -n for the local %s node name", name_for_cluster_type(stack)); } if (name == NULL) { - crm_notice("Could not obtain a node name for %s node with id %u", - name_for_cluster_type(stack), nodeid); + crm_notice("Could not obtain a node name for %s node with " + PCMK_XA_ID " %u", name_for_cluster_type(stack), nodeid); } return name; } /*! * \brief Get the node name corresponding to a node UUID * * \param[in] uuid UUID of desired node * * \return name of desired node * * \note This relies on the remote peer cache being populated with all * remote nodes in the cluster, so callers should maintain that cache. */ const char * crm_peer_uname(const char *uuid) { GHashTableIter iter; crm_node_t *node = NULL; CRM_CHECK(uuid != NULL, return NULL); /* remote nodes have the same uname and uuid */ if (g_hash_table_lookup(crm_remote_peer_cache, uuid)) { return uuid; } /* avoid blocking calls where possible */ g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) { if (node->uname != NULL) { return node->uname; } break; } } node = NULL; if (is_corosync_cluster()) { long long id; if ((pcmk__scan_ll(uuid, &id, 0LL) != pcmk_rc_ok) || (id < 1LL) || (id > UINT32_MAX)) { crm_err("Invalid Corosync node ID '%s'", uuid); return NULL; } node = pcmk__search_node_caches((uint32_t) id, NULL, pcmk__node_search_cluster); if (node != NULL) { crm_info("Setting uuid for node %s[%u] to %s", node->uname, node->id, uuid); node->uuid = strdup(uuid); return node->uname; } return NULL; } return NULL; } /*! * \brief Get a log-friendly string equivalent of a cluster type * * \param[in] type Cluster type * * \return Log-friendly string corresponding to \p type */ const char * name_for_cluster_type(enum cluster_type_e type) { switch (type) { case pcmk_cluster_corosync: return "corosync"; case pcmk_cluster_unknown: return "unknown"; case pcmk_cluster_invalid: return "invalid"; } crm_err("Invalid cluster type: %d", type); return "invalid"; } /*! * \brief Get (and validate) the local cluster type * * \return Local cluster type * \note This will fatally exit if the local cluster type is invalid. */ enum cluster_type_e get_cluster_type(void) { bool detected = false; const char *cluster = NULL; static enum cluster_type_e cluster_type = pcmk_cluster_unknown; /* Return the previous calculation, if any */ if (cluster_type != pcmk_cluster_unknown) { return cluster_type; } cluster = pcmk__env_option(PCMK__ENV_CLUSTER_TYPE); #if SUPPORT_COROSYNC /* If nothing is defined in the environment, try corosync (if supported) */ if (cluster == NULL) { crm_debug("Testing with Corosync"); cluster_type = pcmk__corosync_detect(); if (cluster_type != pcmk_cluster_unknown) { detected = true; goto done; } } #endif /* Something was defined in the environment, test it against what we support */ crm_info("Verifying cluster type: '%s'", ((cluster == NULL)? "-unspecified-" : cluster)); if (cluster == NULL) { #if SUPPORT_COROSYNC } else if (pcmk__str_eq(cluster, "corosync", pcmk__str_casei)) { cluster_type = pcmk_cluster_corosync; #endif } else { cluster_type = pcmk_cluster_invalid; goto done; /* Keep the compiler happy when no stacks are supported */ } done: if (cluster_type == pcmk_cluster_unknown) { crm_notice("Could not determine the current cluster type"); } else if (cluster_type == pcmk_cluster_invalid) { crm_notice("This installation does not support the '%s' cluster infrastructure: terminating.", cluster); crm_exit(CRM_EX_FATAL); } else { crm_info("%s an active '%s' cluster", (detected? "Detected" : "Assuming"), name_for_cluster_type(cluster_type)); } return cluster_type; } /*! * \brief Check whether the local cluster is a Corosync cluster * * \return TRUE if the local cluster is a Corosync cluster, otherwise FALSE */ gboolean is_corosync_cluster(void) { return get_cluster_type() == pcmk_cluster_corosync; } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include void set_uuid(xmlNode *xml, const char *attr, crm_node_t *node) { crm_xml_add(xml, attr, crm_peer_uuid(node)); } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c index 329dfac3c1..0fc93d499a 100644 --- a/lib/cluster/membership.c +++ b/lib/cluster/membership.c @@ -1,1385 +1,1390 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include #include #include #include #include #include #include #include #include #include #include #include "crmcluster_private.h" /* The peer cache remembers cluster nodes that have been seen. * This is managed mostly automatically by libcluster, based on * cluster membership events. * * Because cluster nodes can have conflicting names or UUIDs, * the hash table key is a uniquely generated ID. */ GHashTable *crm_peer_cache = NULL; /* * The remote peer cache tracks pacemaker_remote nodes. While the * value has the same type as the peer cache's, it is tracked separately for * three reasons: pacemaker_remote nodes can't have conflicting names or UUIDs, * so the name (which is also the UUID) is used as the hash table key; there * is no equivalent of membership events, so management is not automatic; and * most users of the peer cache need to exclude pacemaker_remote nodes. * * That said, using a single cache would be more logical and less error-prone, * so it would be a good idea to merge them one day. * * libcluster provides two avenues for populating the cache: * crm_remote_peer_get() and crm_remote_peer_cache_remove() directly manage it, * while crm_remote_peer_cache_refresh() populates it via the CIB. */ GHashTable *crm_remote_peer_cache = NULL; /* * The known node cache tracks cluster and remote nodes that have been seen in * the CIB. It is useful mainly when a caller needs to know about a node that * may no longer be in the membership, but doesn't want to add the node to the * main peer cache tables. */ static GHashTable *known_node_cache = NULL; unsigned long long crm_peer_seq = 0; gboolean crm_have_quorum = FALSE; static gboolean crm_autoreap = TRUE; // Flag setting and clearing for crm_node_t:flags #define set_peer_flags(peer, flags_to_set) do { \ (peer)->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Peer", (peer)->uname, \ (peer)->flags, (flags_to_set), \ #flags_to_set); \ } while (0) #define clear_peer_flags(peer, flags_to_clear) do { \ (peer)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Peer", (peer)->uname, \ (peer)->flags, (flags_to_clear), \ #flags_to_clear); \ } while (0) static void update_peer_uname(crm_node_t *node, const char *uname); static crm_node_t *find_known_node(const char *id, const char *uname); int crm_remote_peer_cache_size(void) { if (crm_remote_peer_cache == NULL) { return 0; } return g_hash_table_size(crm_remote_peer_cache); } /*! * \brief Get a remote node peer cache entry, creating it if necessary * * \param[in] node_name Name of remote node * * \return Cache entry for node on success, NULL (and set errno) otherwise * * \note When creating a new entry, this will leave the node state undetermined, * so the caller should also call pcmk__update_peer_state() if the state * is known. * \note Because this can add and remove cache entries, callers should not * assume any previously obtained cache entry pointers remain valid. */ crm_node_t * crm_remote_peer_get(const char *node_name) { crm_node_t *node; char *node_name_copy = NULL; if (node_name == NULL) { errno = EINVAL; return NULL; } /* It's theoretically possible that the node was added to the cluster peer * cache before it was known to be a Pacemaker Remote node. Remove that * entry unless it has a node ID, which means the name actually is * associated with a cluster node. (@TODO return an error in that case?) */ node = pcmk__search_node_caches(0, node_name, pcmk__node_search_cluster); if ((node != NULL) && (node->uuid == NULL)) { /* node_name could be a pointer into the cache entry being removed, so * reassign it to a copy before the original gets freed */ node_name_copy = strdup(node_name); if (node_name_copy == NULL) { errno = ENOMEM; return NULL; } node_name = node_name_copy; reap_crm_member(0, node_name); } /* Return existing cache entry if one exists */ node = g_hash_table_lookup(crm_remote_peer_cache, node_name); if (node) { free(node_name_copy); return node; } /* Allocate a new entry */ node = calloc(1, sizeof(crm_node_t)); if (node == NULL) { free(node_name_copy); return NULL; } /* Populate the essential information */ set_peer_flags(node, crm_remote_node); node->uuid = strdup(node_name); if (node->uuid == NULL) { free(node); errno = ENOMEM; free(node_name_copy); return NULL; } /* Add the new entry to the cache */ g_hash_table_replace(crm_remote_peer_cache, node->uuid, node); crm_trace("added %s to remote cache", node_name); /* Update the entry's uname, ensuring peer status callbacks are called */ update_peer_uname(node, node_name); free(node_name_copy); return node; } /*! * \brief Remove a node from the Pacemaker Remote node cache * * \param[in] node_name Name of node to remove from cache * * \note The caller must be careful not to use \p node_name after calling this * function if it might be a pointer into the cache entry being removed. */ void crm_remote_peer_cache_remove(const char *node_name) { /* Do a lookup first, because node_name could be a pointer within the entry * being removed -- we can't log it *after* removing it. */ if (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL) { crm_trace("Removing %s from Pacemaker Remote node cache", node_name); g_hash_table_remove(crm_remote_peer_cache, node_name); } } /*! * \internal * \brief Return node status based on a CIB status entry * * \param[in] node_state XML of node state * * \return \c CRM_NODE_LOST if \c PCMK__XA_IN_CCM is false in * \c PCMK__XE_NODE_STATE, \c CRM_NODE_MEMBER otherwise * \note Unlike most boolean XML attributes, this one defaults to true, for * backward compatibility with older controllers that don't set it. */ static const char * remote_state_from_cib(const xmlNode *node_state) { bool status = false; if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM, &status) == pcmk_rc_ok) && !status) { return CRM_NODE_LOST; } else { return CRM_NODE_MEMBER; } } /* user data for looping through remote node xpath searches */ struct refresh_data { const char *field; /* XML attribute to check for node name */ gboolean has_state; /* whether to update node state based on XML */ }; /*! * \internal * \brief Process one pacemaker_remote node xpath search result * * \param[in] result XML search result * \param[in] user_data what to look for in the XML */ static void remote_cache_refresh_helper(xmlNode *result, void *user_data) { const struct refresh_data *data = user_data; const char *remote = crm_element_value(result, data->field); const char *state = NULL; crm_node_t *node; CRM_CHECK(remote != NULL, return); /* Determine node's state, if the result has it */ if (data->has_state) { state = remote_state_from_cib(result); } /* Check whether cache already has entry for node */ node = g_hash_table_lookup(crm_remote_peer_cache, remote); if (node == NULL) { /* Node is not in cache, so add a new entry for it */ node = crm_remote_peer_get(remote); CRM_ASSERT(node); if (state) { pcmk__update_peer_state(__func__, node, state, 0); } } else if (pcmk_is_set(node->flags, crm_node_dirty)) { /* Node is in cache and hasn't been updated already, so mark it clean */ clear_peer_flags(node, crm_node_dirty); if (state) { pcmk__update_peer_state(__func__, node, state, 0); } } } static void mark_dirty(gpointer key, gpointer value, gpointer user_data) { set_peer_flags((crm_node_t *) value, crm_node_dirty); } static gboolean is_dirty(gpointer key, gpointer value, gpointer user_data) { return pcmk_is_set(((crm_node_t*)value)->flags, crm_node_dirty); } /*! * \brief Repopulate the remote peer cache based on CIB XML * * \param[in] xmlNode CIB XML to parse */ void crm_remote_peer_cache_refresh(xmlNode *cib) { struct refresh_data data; crm_peer_init(); /* First, we mark all existing cache entries as dirty, * so that later we can remove any that weren't in the CIB. * We don't empty the cache, because we need to detect changes in state. */ g_hash_table_foreach(crm_remote_peer_cache, mark_dirty, NULL); /* Look for guest nodes and remote nodes in the status section */ data.field = PCMK_XA_ID; data.has_state = TRUE; crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_STATUS, remote_cache_refresh_helper, &data); /* Look for guest nodes and remote nodes in the configuration section, * because they may have just been added and not have a status entry yet. * In that case, the cached node state will be left NULL, so that the * peer status callback isn't called until we're sure the node started * successfully. */ data.field = PCMK_XA_VALUE; data.has_state = FALSE; crm_foreach_xpath_result(cib, PCMK__XP_GUEST_NODE_CONFIG, remote_cache_refresh_helper, &data); data.field = PCMK_XA_ID; data.has_state = FALSE; crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_CONFIG, remote_cache_refresh_helper, &data); /* Remove all old cache entries that weren't seen in the CIB */ g_hash_table_foreach_remove(crm_remote_peer_cache, is_dirty, NULL); } gboolean crm_is_peer_active(const crm_node_t * node) { if(node == NULL) { return FALSE; } if (pcmk_is_set(node->flags, crm_remote_node)) { /* remote nodes are never considered active members. This * guarantees they will never be considered for DC membership.*/ return FALSE; } #if SUPPORT_COROSYNC if (is_corosync_cluster()) { return crm_is_corosync_peer_active(node); } #endif crm_err("Unhandled cluster type: %s", name_for_cluster_type(get_cluster_type())); return FALSE; } static gboolean crm_reap_dead_member(gpointer key, gpointer value, gpointer user_data) { crm_node_t *node = value; crm_node_t *search = user_data; if (search == NULL) { return FALSE; } else if (search->id && node->id != search->id) { return FALSE; } else if (search->id == 0 && !pcmk__str_eq(node->uname, search->uname, pcmk__str_casei)) { return FALSE; } else if (crm_is_peer_active(value) == FALSE) { - crm_info("Removing node with name %s and id %u from membership cache", + crm_info("Removing node with name %s and " PCMK_XA_ID + " %u from membership cache", (node->uname? node->uname : "unknown"), node->id); return TRUE; } return FALSE; } /*! * \brief Remove all peer cache entries matching a node ID and/or uname * * \param[in] id ID of node to remove (or 0 to ignore) * \param[in] name Uname of node to remove (or NULL to ignore) * * \return Number of cache entries removed * * \note The caller must be careful not to use \p name after calling this * function if it might be a pointer into the cache entry being removed. */ guint reap_crm_member(uint32_t id, const char *name) { int matches = 0; crm_node_t search = { 0, }; if (crm_peer_cache == NULL) { crm_trace("Membership cache not initialized, ignoring purge request"); return 0; } search.id = id; pcmk__str_update(&search.uname, name); matches = g_hash_table_foreach_remove(crm_peer_cache, crm_reap_dead_member, &search); if(matches) { - crm_notice("Purged %d peer%s with id=%u%s%s from the membership cache", + crm_notice("Purged %d peer%s with " PCMK_XA_ID + "=%u%s%s from the membership cache", matches, pcmk__plural_s(matches), search.id, (search.uname? " and/or uname=" : ""), (search.uname? search.uname : "")); } else { - crm_info("No peers with id=%u%s%s to purge from the membership cache", + crm_info("No peers with " PCMK_XA_ID + "=%u%s%s to purge from the membership cache", search.id, (search.uname? " and/or uname=" : ""), (search.uname? search.uname : "")); } free(search.uname); return matches; } static void count_peer(gpointer key, gpointer value, gpointer user_data) { guint *count = user_data; crm_node_t *node = value; if (crm_is_peer_active(node)) { *count = *count + 1; } } guint crm_active_peers(void) { guint count = 0; if (crm_peer_cache) { g_hash_table_foreach(crm_peer_cache, count_peer, &count); } return count; } static void destroy_crm_node(gpointer data) { crm_node_t *node = data; crm_trace("Destroying entry for node %u: %s", node->id, node->uname); free(node->uname); free(node->state); free(node->uuid); free(node->expected); free(node->conn_host); free(node); } void crm_peer_init(void) { if (crm_peer_cache == NULL) { crm_peer_cache = pcmk__strikey_table(free, destroy_crm_node); } if (crm_remote_peer_cache == NULL) { crm_remote_peer_cache = pcmk__strikey_table(NULL, destroy_crm_node); } if (known_node_cache == NULL) { known_node_cache = pcmk__strikey_table(free, destroy_crm_node); } } void crm_peer_destroy(void) { if (crm_peer_cache != NULL) { crm_trace("Destroying peer cache with %d members", g_hash_table_size(crm_peer_cache)); g_hash_table_destroy(crm_peer_cache); crm_peer_cache = NULL; } if (crm_remote_peer_cache != NULL) { crm_trace("Destroying remote peer cache with %d members", g_hash_table_size(crm_remote_peer_cache)); g_hash_table_destroy(crm_remote_peer_cache); crm_remote_peer_cache = NULL; } if (known_node_cache != NULL) { crm_trace("Destroying known node cache with %d members", g_hash_table_size(known_node_cache)); g_hash_table_destroy(known_node_cache); known_node_cache = NULL; } } static void (*peer_status_callback)(enum crm_status_type, crm_node_t *, const void *) = NULL; /*! * \brief Set a client function that will be called after peer status changes * * \param[in] dispatch Pointer to function to use as callback * * \note Previously, client callbacks were responsible for peer cache * management. This is no longer the case, and client callbacks should do * only client-specific handling. Callbacks MUST NOT add or remove entries * in the peer caches. */ void crm_set_status_callback(void (*dispatch) (enum crm_status_type, crm_node_t *, const void *)) { peer_status_callback = dispatch; } /*! * \brief Tell the library whether to automatically reap lost nodes * * If TRUE (the default), calling crm_update_peer_proc() will also update the * peer state to CRM_NODE_MEMBER or CRM_NODE_LOST, and pcmk__update_peer_state() * will reap peers whose state changes to anything other than CRM_NODE_MEMBER. * Callers should leave this enabled unless they plan to manage the cache * separately on their own. * * \param[in] autoreap TRUE to enable automatic reaping, FALSE to disable */ void crm_set_autoreap(gboolean autoreap) { crm_autoreap = autoreap; } static void dump_peer_hash(int level, const char *caller) { GHashTableIter iter; const char *id = NULL; crm_node_t *node = NULL; g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) { do_crm_log(level, "%s: Node %u/%s = %p - %s", caller, node->id, node->uname, node, id); } } static gboolean hash_find_by_data(gpointer key, gpointer value, gpointer user_data) { return value == user_data; } /*! * \internal * \brief Search caches for a node (cluster or Pacemaker Remote) * * \param[in] id If not 0, cluster node ID to search for * \param[in] uname If not NULL, node name to search for * \param[in] flags Group of enum pcmk__node_search_flags * * \return Node cache entry if found, otherwise NULL */ crm_node_t * pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags) { crm_node_t *node = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if ((uname != NULL) && pcmk_is_set(flags, pcmk__node_search_remote)) { node = g_hash_table_lookup(crm_remote_peer_cache, uname); } if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_cluster)) { node = pcmk__search_cluster_node_cache(id, uname, NULL); } if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_known)) { char *id_str = (id == 0)? NULL : crm_strdup_printf("%u", id); node = find_known_node(id_str, uname); free(id_str); } return node; } /*! * \internal * \brief Purge a node from cache (both cluster and Pacemaker Remote) * * \param[in] node_name If not NULL, purge only nodes with this name * \param[in] node_id If not 0, purge cluster nodes only if they have this ID * * \note If \p node_name is NULL and \p node_id is 0, no nodes will be purged. * If \p node_name is not NULL and \p node_id is not 0, Pacemaker Remote * nodes that match \p node_name will be purged, and cluster nodes that * match both \p node_name and \p node_id will be purged. * \note The caller must be careful not to use \p node_name after calling this * function if it might be a pointer into a cache entry being removed. */ void pcmk__purge_node_from_cache(const char *node_name, uint32_t node_id) { char *node_name_copy = NULL; if ((node_name == NULL) && (node_id == 0U)) { return; } // Purge from Pacemaker Remote node cache if ((node_name != NULL) && (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL)) { /* node_name could be a pointer into the cache entry being purged, * so reassign it to a copy before the original gets freed */ node_name_copy = strdup(node_name); CRM_ASSERT(node_name_copy != NULL); node_name = node_name_copy; crm_trace("Purging %s from Pacemaker Remote node cache", node_name); g_hash_table_remove(crm_remote_peer_cache, node_name); } reap_crm_member(node_id, node_name); free(node_name_copy); } /*! * \internal * \brief Search cluster node cache * * \param[in] id If not 0, cluster node ID to search for * \param[in] uname If not NULL, node name to search for * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster * node ID to search for * * \return Cluster node cache entry if found, otherwise NULL */ crm_node_t * pcmk__search_cluster_node_cache(unsigned int id, const char *uname, const char *uuid) { GHashTableIter iter; crm_node_t *node = NULL; crm_node_t *by_id = NULL; crm_node_t *by_name = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); if (uname != NULL) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(node->uname && strcasecmp(node->uname, uname) == 0) { crm_trace("Name match: %s = %p", node->uname, node); by_name = node; break; } } } if (id > 0) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(node->id == id) { crm_trace("ID match: %u = %p", node->id, node); by_id = node; break; } } } else if (uuid != NULL) { g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) { crm_trace("UUID match: %s = %p", node->uuid, node); by_id = node; break; } } } node = by_id; /* Good default */ if(by_id == by_name) { /* Nothing to do if they match (both NULL counts) */ crm_trace("Consistent: %p for %u/%s", by_id, id, uname); } else if(by_id == NULL && by_name) { crm_trace("Only one: %p for %u/%s", by_name, id, uname); if(id && by_name->id) { dump_peer_hash(LOG_WARNING, __func__); crm_crit("Node %u and %u share the same name '%s'", id, by_name->id, uname); node = NULL; /* Create a new one */ } else { node = by_name; } } else if(by_name == NULL && by_id) { crm_trace("Only one: %p for %u/%s", by_id, id, uname); if(uname && by_id->uname) { dump_peer_hash(LOG_WARNING, __func__); crm_crit("Node '%s' and '%s' share the same cluster nodeid %u: assuming '%s' is correct", uname, by_id->uname, id, uname); } } else if(uname && by_id->uname) { if(pcmk__str_eq(uname, by_id->uname, pcmk__str_casei)) { crm_notice("Node '%s' has changed its ID from %u to %u", by_id->uname, by_name->id, by_id->id); g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name); } else { crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname); dump_peer_hash(LOG_INFO, __func__); crm_abort(__FILE__, __func__, __LINE__, "member weirdness", TRUE, TRUE); } } else if(id && by_name->id) { crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname); } else { /* Simple merge */ /* Only corosync-based clusters use node IDs. The functions that call * pcmk__update_peer_state() and crm_update_peer_proc() only know * nodeid, so 'by_id' is authoritative when merging. */ dump_peer_hash(LOG_DEBUG, __func__); crm_info("Merging %p into %p", by_name, by_id); g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name); } return node; } #if SUPPORT_COROSYNC static guint remove_conflicting_peer(crm_node_t *node) { int matches = 0; GHashTableIter iter; crm_node_t *existing_node = NULL; if (node->id == 0 || node->uname == NULL) { return 0; } if (!pcmk__corosync_has_nodelist()) { return 0; } g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &existing_node)) { if (existing_node->id > 0 && existing_node->id != node->id && existing_node->uname != NULL && strcasecmp(existing_node->uname, node->uname) == 0) { if (crm_is_peer_active(existing_node)) { continue; } crm_warn("Removing cached offline node %u/%s which has conflicting uname with %u", existing_node->id, existing_node->uname, node->id); g_hash_table_iter_remove(&iter); matches++; } } return matches; } #endif /*! * \brief Get a cluster node cache entry * * \param[in] id If not 0, cluster node ID to search for * \param[in] uname If not NULL, node name to search for * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster * node ID to search for * \param[in] flags Group of enum pcmk__node_search_flags * * \return (Possibly newly created) cluster node cache entry */ /* coverity[-alloc] Memory is referenced in one or both hashtables */ crm_node_t * pcmk__get_node(unsigned int id, const char *uname, const char *uuid, uint32_t flags) { crm_node_t *node = NULL; char *uname_lookup = NULL; CRM_ASSERT(id > 0 || uname != NULL); crm_peer_init(); // Check the Pacemaker Remote node cache first if (pcmk_is_set(flags, pcmk__node_search_remote)) { node = g_hash_table_lookup(crm_remote_peer_cache, uname); if (node != NULL) { return node; } } if (!pcmk_is_set(flags, pcmk__node_search_cluster)) { return NULL; } node = pcmk__search_cluster_node_cache(id, uname, uuid); /* if uname wasn't provided, and find_peer did not turn up a uname based on id. * we need to do a lookup of the node name using the id in the cluster membership. */ if ((node == NULL || node->uname == NULL) && (uname == NULL)) { uname_lookup = get_node_name(id); } if (uname_lookup) { uname = uname_lookup; crm_trace("Inferred a name of '%s' for node %u", uname, id); /* try to turn up the node one more time now that we know the uname. */ if (node == NULL) { node = pcmk__search_cluster_node_cache(id, uname, uuid); } } if (node == NULL) { char *uniqueid = crm_generate_uuid(); node = calloc(1, sizeof(crm_node_t)); CRM_ASSERT(node); crm_info("Created entry %s/%p for node %s/%u (%d total)", uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache)); g_hash_table_replace(crm_peer_cache, uniqueid, node); } if(id > 0 && uname && (node->id == 0 || node->uname == NULL)) { crm_info("Node %u is now known as %s", id, uname); } if(id > 0 && node->id == 0) { node->id = id; } if (uname && (node->uname == NULL)) { update_peer_uname(node, uname); } if(node->uuid == NULL) { if (uuid == NULL) { uuid = crm_peer_uuid(node); } if (uuid) { crm_info("Node %u has uuid %s", id, uuid); } else { crm_info("Cannot obtain a UUID for node %u/%s", id, node->uname); } } free(uname_lookup); return node; } /*! * \internal * \brief Update a node's uname * * \param[in,out] node Node object to update * \param[in] uname New name to set * * \note This function should not be called within a peer cache iteration, * because in some cases it can remove conflicting cache entries, * which would invalidate the iterator. */ static void update_peer_uname(crm_node_t *node, const char *uname) { CRM_CHECK(uname != NULL, crm_err("Bug: can't update node name without name"); return); CRM_CHECK(node != NULL, crm_err("Bug: can't update node name to %s without node", uname); return); if (pcmk__str_eq(uname, node->uname, pcmk__str_casei)) { crm_debug("Node uname '%s' did not change", uname); return; } for (const char *c = uname; *c; ++c) { if ((*c >= 'A') && (*c <= 'Z')) { crm_warn("Node names with capitals are discouraged, consider changing '%s'", uname); break; } } pcmk__str_update(&node->uname, uname); if (peer_status_callback != NULL) { peer_status_callback(crm_status_uname, node, NULL); } #if SUPPORT_COROSYNC if (is_corosync_cluster() && !pcmk_is_set(node->flags, crm_remote_node)) { remove_conflicting_peer(node); } #endif } /*! * \internal * \brief Get log-friendly string equivalent of a process flag * * \param[in] proc Process flag * * \return Log-friendly string equivalent of \p proc */ static inline const char * proc2text(enum crm_proc_flag proc) { const char *text = "unknown"; switch (proc) { case crm_proc_none: text = "none"; break; case crm_proc_based: text = "pacemaker-based"; break; case crm_proc_controld: text = "pacemaker-controld"; break; case crm_proc_schedulerd: text = "pacemaker-schedulerd"; break; case crm_proc_execd: text = "pacemaker-execd"; break; case crm_proc_attrd: text = "pacemaker-attrd"; break; case crm_proc_fenced: text = "pacemaker-fenced"; break; case crm_proc_cpg: text = "corosync-cpg"; break; } return text; } /*! * \internal * \brief Update a node's process information (and potentially state) * * \param[in] source Caller's function name (for log messages) * \param[in,out] node Node object to update * \param[in] flag Bitmask of new process information * \param[in] status node status (online, offline, etc.) * * \return NULL if any node was reaped from peer caches, value of node otherwise * * \note If this function returns NULL, the supplied node object was likely * freed and should not be used again. This function should not be * called within a cache iteration if reaping is possible, otherwise * reaping could invalidate the iterator. */ crm_node_t * crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const char *status) { uint32_t last = 0; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL", source, proc2text(flag), status); return NULL); /* Pacemaker doesn't spawn processes on remote nodes */ if (pcmk_is_set(node->flags, crm_remote_node)) { return node; } last = node->processes; if (status == NULL) { node->processes = flag; if (node->processes != last) { changed = TRUE; } } else if (pcmk__str_eq(status, PCMK_VALUE_ONLINE, pcmk__str_casei)) { if ((node->processes & flag) != flag) { node->processes = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Peer process", node->uname, node->processes, flag, "processes"); changed = TRUE; } } else if (node->processes & flag) { node->processes = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Peer process", node->uname, node->processes, flag, "processes"); changed = TRUE; } if (changed) { if (status == NULL && flag <= crm_proc_none) { crm_info("%s: Node %s[%u] - all processes are now offline", source, node->uname, node->id); } else { crm_info("%s: Node %s[%u] - %s is now %s", source, node->uname, node->id, proc2text(flag), status); } if (pcmk_is_set(node->processes, crm_get_cluster_proc())) { node->when_online = time(NULL); } else { node->when_online = 0; } /* Call the client callback first, then update the peer state, * in case the node will be reaped */ if (peer_status_callback != NULL) { peer_status_callback(crm_status_processes, node, &last); } /* The client callback shouldn't touch the peer caches, * but as a safety net, bail if the peer cache was destroyed. */ if (crm_peer_cache == NULL) { return NULL; } if (crm_autoreap) { const char *peer_state = NULL; if (pcmk_is_set(node->processes, crm_get_cluster_proc())) { peer_state = CRM_NODE_MEMBER; } else { peer_state = CRM_NODE_LOST; } node = pcmk__update_peer_state(__func__, node, peer_state, 0); } } else { crm_trace("%s: Node %s[%u] - %s is unchanged (%s)", source, node->uname, node->id, proc2text(flag), status); } return node; } /*! * \internal * \brief Update a cluster node cache entry's expected join state * * \param[in] source Caller's function name (for logging) * \param[in,out] node Node to update * \param[in] expected Node's new join state */ void pcmk__update_peer_expected(const char *source, crm_node_t *node, const char *expected) { char *last = NULL; gboolean changed = FALSE; CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected); return); /* Remote nodes don't participate in joins */ if (pcmk_is_set(node->flags, crm_remote_node)) { return; } last = node->expected; if (expected != NULL && !pcmk__str_eq(node->expected, expected, pcmk__str_casei)) { node->expected = strdup(expected); changed = TRUE; } if (changed) { crm_info("%s: Node %s[%u] - expected state is now %s (was %s)", source, node->uname, node->id, expected, last); free(last); } else { crm_trace("%s: Node %s[%u] - expected state is unchanged (%s)", source, node->uname, node->id, expected); } } /*! * \internal * \brief Update a node's state and membership information * * \param[in] source Caller's function name (for log messages) * \param[in,out] node Node object to update * \param[in] state Node's new state * \param[in] membership Node's new membership ID * \param[in,out] iter If not NULL, pointer to node's peer cache iterator * * \return NULL if any node was reaped, value of node otherwise * * \note If this function returns NULL, the supplied node object was likely * freed and should not be used again. This function may be called from * within a peer cache iteration if the iterator is supplied. */ static crm_node_t * update_peer_state_iter(const char *source, crm_node_t *node, const char *state, uint64_t membership, GHashTableIter *iter) { gboolean is_member; CRM_CHECK(node != NULL, crm_err("Could not set state for unknown host to %s" CRM_XS " source=%s", state, source); return NULL); is_member = pcmk__str_eq(state, CRM_NODE_MEMBER, pcmk__str_casei); if (is_member) { node->when_lost = 0; if (membership) { node->last_seen = membership; } } if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) { char *last = node->state; if (is_member) { node->when_member = time(NULL); } else { node->when_member = 0; } node->state = strdup(state); crm_notice("Node %s state is now %s " CRM_XS " nodeid=%u previous=%s source=%s", node->uname, state, node->id, (last? last : "unknown"), source); if (peer_status_callback != NULL) { peer_status_callback(crm_status_nstate, node, last); } free(last); if (crm_autoreap && !is_member && !pcmk_is_set(node->flags, crm_remote_node)) { /* We only autoreap from the peer cache, not the remote peer cache, * because the latter should be managed only by * crm_remote_peer_cache_refresh(). */ if(iter) { - crm_notice("Purged 1 peer with id=%u and/or uname=%s from the membership cache", node->id, node->uname); + crm_notice("Purged 1 peer with " PCMK_XA_ID + "=%u and/or uname=%s from the membership cache", + node->id, node->uname); g_hash_table_iter_remove(iter); } else { reap_crm_member(node->id, node->uname); } node = NULL; } } else { crm_trace("Node %s state is unchanged (%s) " CRM_XS " nodeid=%u source=%s", node->uname, state, node->id, source); } return node; } /*! * \brief Update a node's state and membership information * * \param[in] source Caller's function name (for log messages) * \param[in,out] node Node object to update * \param[in] state Node's new state * \param[in] membership Node's new membership ID * * \return NULL if any node was reaped, value of node otherwise * * \note If this function returns NULL, the supplied node object was likely * freed and should not be used again. This function should not be * called within a cache iteration if reaping is possible, * otherwise reaping could invalidate the iterator. */ crm_node_t * pcmk__update_peer_state(const char *source, crm_node_t *node, const char *state, uint64_t membership) { return update_peer_state_iter(source, node, state, membership, NULL); } /*! * \internal * \brief Reap all nodes from cache whose membership information does not match * * \param[in] membership Membership ID of nodes to keep */ void pcmk__reap_unseen_nodes(uint64_t membership) { GHashTableIter iter; crm_node_t *node = NULL; crm_trace("Reaping unseen nodes..."); g_hash_table_iter_init(&iter, crm_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&node)) { if (node->last_seen != membership) { if (node->state) { /* * Calling update_peer_state_iter() allows us to * remove the node from crm_peer_cache without * invalidating our iterator */ update_peer_state_iter(__func__, node, CRM_NODE_LOST, membership, &iter); } else { crm_info("State of node %s[%u] is still unknown", node->uname, node->id); } } } } static crm_node_t * find_known_node(const char *id, const char *uname) { GHashTableIter iter; crm_node_t *node = NULL; crm_node_t *by_id = NULL; crm_node_t *by_name = NULL; if (uname) { g_hash_table_iter_init(&iter, known_node_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if (node->uname && strcasecmp(node->uname, uname) == 0) { crm_trace("Name match: %s = %p", node->uname, node); by_name = node; break; } } } if (id) { g_hash_table_iter_init(&iter, known_node_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if(strcasecmp(node->uuid, id) == 0) { crm_trace("ID match: %s= %p", id, node); by_id = node; break; } } } node = by_id; /* Good default */ if (by_id == by_name) { /* Nothing to do if they match (both NULL counts) */ crm_trace("Consistent: %p for %s/%s", by_id, id, uname); } else if (by_id == NULL && by_name) { crm_trace("Only one: %p for %s/%s", by_name, id, uname); if (id) { node = NULL; } else { node = by_name; } } else if (by_name == NULL && by_id) { crm_trace("Only one: %p for %s/%s", by_id, id, uname); if (uname) { node = NULL; } } else if (uname && by_id->uname && pcmk__str_eq(uname, by_id->uname, pcmk__str_casei)) { /* Multiple nodes have the same uname in the CIB. * Return by_id. */ } else if (id && by_name->uuid && pcmk__str_eq(id, by_name->uuid, pcmk__str_casei)) { /* Multiple nodes have the same id in the CIB. * Return by_name. */ node = by_name; } else { node = NULL; } if (node == NULL) { crm_debug("Couldn't find node%s%s%s%s", id? " " : "", id? id : "", uname? " with name " : "", uname? uname : ""); } return node; } static void known_node_cache_refresh_helper(xmlNode *xml_node, void *user_data) { const char *id = crm_element_value(xml_node, PCMK_XA_ID); const char *uname = crm_element_value(xml_node, PCMK_XA_UNAME); crm_node_t * node = NULL; CRM_CHECK(id != NULL && uname !=NULL, return); node = find_known_node(id, uname); if (node == NULL) { char *uniqueid = crm_generate_uuid(); node = calloc(1, sizeof(crm_node_t)); CRM_ASSERT(node != NULL); node->uname = strdup(uname); CRM_ASSERT(node->uname != NULL); node->uuid = strdup(id); CRM_ASSERT(node->uuid != NULL); g_hash_table_replace(known_node_cache, uniqueid, node); } else if (pcmk_is_set(node->flags, crm_node_dirty)) { pcmk__str_update(&node->uname, uname); /* Node is in cache and hasn't been updated already, so mark it clean */ clear_peer_flags(node, crm_node_dirty); } } static void refresh_known_node_cache(xmlNode *cib) { crm_peer_init(); g_hash_table_foreach(known_node_cache, mark_dirty, NULL); crm_foreach_xpath_result(cib, PCMK__XP_MEMBER_NODE_CONFIG, known_node_cache_refresh_helper, NULL); /* Remove all old cache entries that weren't seen in the CIB */ g_hash_table_foreach_remove(known_node_cache, is_dirty, NULL); } void pcmk__refresh_node_caches_from_cib(xmlNode *cib) { crm_remote_peer_cache_refresh(cib); refresh_known_node_cache(cib); } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include int crm_terminate_member(int nodeid, const char *uname, void *unused) { return stonith_api_kick(nodeid, uname, 120, TRUE); } int crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection) { return stonith_api_kick(nodeid, uname, 120, TRUE); } crm_node_t * crm_get_peer(unsigned int id, const char *uname) { return pcmk__get_node(id, uname, NULL, pcmk__node_search_cluster); } crm_node_t * crm_get_peer_full(unsigned int id, const char *uname, int flags) { return pcmk__get_node(id, uname, NULL, flags); } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/common/acl.c b/lib/common/acl.c index 671ffff9b7..f3b5f0ff75 100644 --- a/lib/common/acl.c +++ b/lib/common/acl.c @@ -1,866 +1,867 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include "crmcommon_private.h" typedef struct xml_acl_s { enum xml_private_flags mode; gchar *xpath; } xml_acl_t; static void free_acl(void *data) { if (data) { xml_acl_t *acl = data; g_free(acl->xpath); free(acl); } } void pcmk__free_acls(GList *acls) { g_list_free_full(acls, free_acl); } static GList * create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode) { xml_acl_t *acl = NULL; const char *tag = crm_element_value(xml, PCMK_XA_OBJECT_TYPE); const char *ref = crm_element_value(xml, PCMK_XA_REFERENCE); const char *xpath = crm_element_value(xml, PCMK_XA_XPATH); const char *attr = crm_element_value(xml, PCMK_XA_ATTRIBUTE); if (tag == NULL) { // @COMPAT Deprecated since 1.1.12 (needed for rolling upgrades) tag = crm_element_value(xml, PCMK_XA_TAG); } if (ref == NULL) { // @COMPAT Deprecated since 1.1.12 (needed for rolling upgrades) ref = crm_element_value(xml, PCMK__XA_REF); } if ((tag == NULL) && (ref == NULL) && (xpath == NULL)) { // Schema should prevent this, but to be safe ... crm_trace("Ignoring ACL <%s> element without selection criteria", xml->name); return NULL; } acl = calloc(1, sizeof (xml_acl_t)); CRM_ASSERT(acl != NULL); acl->mode = mode; if (xpath) { acl->xpath = g_strdup(xpath); crm_trace("Unpacked ACL <%s> element using xpath: %s", xml->name, acl->xpath); } else { GString *buf = g_string_sized_new(128); if ((ref != NULL) && (attr != NULL)) { // NOTE: schema currently does not allow this pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), "[@" PCMK_XA_ID "='", ref, "' and @", attr, "]", NULL); } else if (ref != NULL) { pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), "[@" PCMK_XA_ID "='", ref, "']", NULL); } else if (attr != NULL) { pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), "[@", attr, "]", NULL); } else { pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), NULL); } acl->xpath = buf->str; g_string_free(buf, FALSE); crm_trace("Unpacked ACL <%s> element as xpath: %s", xml->name, acl->xpath); } return g_list_append(acls, acl); } /*! * \internal * \brief Unpack a user, group, or role subtree of the ACLs section * * \param[in] acl_top XML of entire ACLs section * \param[in] acl_entry XML of ACL element being unpacked * \param[in,out] acls List of ACLs unpacked so far * * \return New head of (possibly modified) acls * * \note This function is recursive */ static GList * parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls) { xmlNode *child = NULL; for (child = pcmk__xe_first_child(acl_entry); child; child = pcmk__xe_next(child)) { const char *tag = (const char *) child->name; const char *kind = crm_element_value(child, PCMK_XA_KIND); if (pcmk__xe_is(child, PCMK_XE_ACL_PERMISSION)) { CRM_ASSERT(kind != NULL); crm_trace("Unpacking ACL <%s> element of kind '%s'", tag, kind); tag = kind; } else { crm_trace("Unpacking ACL <%s> element", tag); } /* @COMPAT PCMK__XE_ROLE_REF was deprecated in Pacemaker 1.1.12 (needed * for rolling upgrades) */ if (pcmk__str_any_of(tag, PCMK_XE_ROLE, PCMK__XE_ROLE_REF, NULL)) { const char *ref_role = crm_element_value(child, PCMK_XA_ID); if (ref_role) { xmlNode *role = NULL; for (role = pcmk__xe_first_child(acl_top); role; role = pcmk__xe_next(role)) { if (!strcmp(PCMK_XE_ACL_ROLE, (const char *) role->name)) { const char *role_id = crm_element_value(role, PCMK_XA_ID); if (role_id && strcmp(ref_role, role_id) == 0) { crm_trace("Unpacking referenced role '%s' in ACL <%s> element", role_id, acl_entry->name); acls = parse_acl_entry(acl_top, role, acls); break; } } } } /* @COMPAT Use of a tag instead of a PCMK_XA_KIND attribute was * deprecated in 1.1.12. We still need to look for tags named * PCMK_VALUE_READ, etc., to support rolling upgrades. However, * eventually we can clean this up and make the variables more intuitive * (for example, don't assign a PCMK_XA_KIND value to the tag variable). */ } else if (strcmp(tag, PCMK_VALUE_READ) == 0) { acls = create_acl(child, acls, pcmk__xf_acl_read); } else if (strcmp(tag, PCMK_VALUE_WRITE) == 0) { acls = create_acl(child, acls, pcmk__xf_acl_write); } else if (strcmp(tag, PCMK_VALUE_DENY) == 0) { acls = create_acl(child, acls, pcmk__xf_acl_deny); } else { crm_warn("Ignoring unknown ACL %s '%s'", (kind? "kind" : "element"), tag); } } return acls; } /* */ static const char * acl_to_text(enum xml_private_flags flags) { if (pcmk_is_set(flags, pcmk__xf_acl_deny)) { return "deny"; } else if (pcmk_any_flags_set(flags, pcmk__xf_acl_write|pcmk__xf_acl_create)) { return "read/write"; } else if (pcmk_is_set(flags, pcmk__xf_acl_read)) { return "read"; } return "none"; } void pcmk__apply_acl(xmlNode *xml) { GList *aIter = NULL; xml_doc_private_t *docpriv = xml->doc->_private; xml_node_private_t *nodepriv; xmlXPathObjectPtr xpathObj = NULL; if (!xml_acl_enabled(xml)) { crm_trace("Skipping ACLs for user '%s' because not enabled for this XML", docpriv->user); return; } for (aIter = docpriv->acls; aIter != NULL; aIter = aIter->next) { int max = 0, lpc = 0; xml_acl_t *acl = aIter->data; xpathObj = xpath_search(xml, acl->xpath); max = numXpathResults(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); nodepriv = match->_private; pcmk__set_xml_flags(nodepriv, acl->mode); // Build a GString only if tracing is enabled pcmk__if_tracing( { GString *path = pcmk__element_xpath(match); crm_trace("Applying %s ACL to %s matched by %s", acl_to_text(acl->mode), path->str, acl->xpath); g_string_free(path, TRUE); }, {} ); } crm_trace("Applied %s ACL %s (%d match%s)", acl_to_text(acl->mode), acl->xpath, max, ((max == 1)? "" : "es")); freeXpathObject(xpathObj); } } /*! * \internal * \brief Unpack ACLs for a given user into the * metadata of the target XML tree * * Taking the description of ACLs from the source XML tree and * marking up the target XML tree with access information for the * given user by tacking it onto the relevant nodes * * \param[in] source XML with ACL definitions * \param[in,out] target XML that ACLs will be applied to * \param[in] user Username whose ACLs need to be unpacked */ void pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user) { xml_doc_private_t *docpriv = NULL; if ((target == NULL) || (target->doc == NULL) || (target->doc->_private == NULL)) { return; } docpriv = target->doc->_private; if (!pcmk_acl_required(user)) { crm_trace("Not unpacking ACLs because not required for user '%s'", user); } else if (docpriv->acls == NULL) { xmlNode *acls = get_xpath_object("//" PCMK_XE_ACLS, source, LOG_NEVER); pcmk__str_update(&docpriv->user, user); if (acls) { xmlNode *child = NULL; for (child = pcmk__xe_first_child(acls); child; child = pcmk__xe_next(child)) { /* @COMPAT PCMK__XE_ACL_USER was deprecated in Pacemaker 1.1.12 * (needed for rolling upgrades) */ if (pcmk__xe_is(child, PCMK_XE_ACL_TARGET) || pcmk__xe_is(child, PCMK__XE_ACL_USER)) { const char *id = crm_element_value(child, PCMK_XA_NAME); if (id == NULL) { id = crm_element_value(child, PCMK_XA_ID); } if (id && strcmp(id, user) == 0) { crm_debug("Unpacking ACLs for user '%s'", id); docpriv->acls = parse_acl_entry(acls, child, docpriv->acls); } } else if (pcmk__xe_is(child, PCMK_XE_ACL_GROUP)) { const char *id = crm_element_value(child, PCMK_XA_NAME); if (id == NULL) { id = crm_element_value(child, PCMK_XA_ID); } if (id && pcmk__is_user_in_group(user,id)) { crm_debug("Unpacking ACLs for group '%s'", id); docpriv->acls = parse_acl_entry(acls, child, docpriv->acls); } } } } } } /*! * \internal * \brief Copy source to target and set xf_acl_enabled flag in target * * \param[in] acl_source XML with ACL definitions * \param[in,out] target XML that ACLs will be applied to * \param[in] user Username whose ACLs need to be set */ void pcmk__enable_acl(xmlNode *acl_source, xmlNode *target, const char *user) { pcmk__unpack_acl(acl_source, target, user); pcmk__set_xml_doc_flag(target, pcmk__xf_acl_enabled); pcmk__apply_acl(target); } static inline bool test_acl_mode(enum xml_private_flags allowed, enum xml_private_flags requested) { if (pcmk_is_set(allowed, pcmk__xf_acl_deny)) { return false; } else if (pcmk_all_flags_set(allowed, requested)) { return true; } else if (pcmk_is_set(requested, pcmk__xf_acl_read) && pcmk_is_set(allowed, pcmk__xf_acl_write)) { return true; } else if (pcmk_is_set(requested, pcmk__xf_acl_create) && pcmk_any_flags_set(allowed, pcmk__xf_acl_write|pcmk__xf_created)) { return true; } return false; } /*! * \internal * \brief Rid XML tree of all unreadable nodes and node properties * * \param[in,out] xml Root XML node to be purged of attributes * * \return true if this node or any of its children are readable * if false is returned, xml will be freed * * \note This function is recursive */ static bool purge_xml_attributes(xmlNode *xml) { xmlNode *child = NULL; xmlAttr *xIter = NULL; bool readable_children = false; xml_node_private_t *nodepriv = xml->_private; if (test_acl_mode(nodepriv->flags, pcmk__xf_acl_read)) { crm_trace("%s[@" PCMK_XA_ID "=%s] is readable", xml->name, pcmk__xe_id(xml)); return true; } xIter = xml->properties; while (xIter != NULL) { xmlAttr *tmp = xIter; const char *prop_name = (const char *)xIter->name; xIter = xIter->next; if (strcmp(prop_name, PCMK_XA_ID) == 0) { continue; } xmlUnsetProp(xml, tmp->name); } child = pcmk__xml_first_child(xml); while ( child != NULL ) { xmlNode *tmp = child; child = pcmk__xml_next(child); readable_children |= purge_xml_attributes(tmp); } if (!readable_children) { free_xml(xml); /* Nothing readable under here, purge completely */ } return readable_children; } /*! * \brief Copy ACL-allowed portions of specified XML * * \param[in] user Username whose ACLs should be used * \param[in] acl_source XML containing ACLs * \param[in] xml XML to be copied * \param[out] result Copy of XML portions readable via ACLs * * \return true if xml exists and ACLs are required for user, false otherwise * \note If this returns true, caller should use \p result rather than \p xml */ bool xml_acl_filtered_copy(const char *user, xmlNode *acl_source, xmlNode *xml, xmlNode **result) { GList *aIter = NULL; xmlNode *target = NULL; xml_doc_private_t *docpriv = NULL; *result = NULL; if ((xml == NULL) || !pcmk_acl_required(user)) { crm_trace("Not filtering XML because ACLs not required for user '%s'", user); return false; } crm_trace("Filtering XML copy using user '%s' ACLs", user); target = pcmk__xml_copy(NULL, xml); if (target == NULL) { return true; } pcmk__enable_acl(acl_source, target, user); docpriv = target->doc->_private; for(aIter = docpriv->acls; aIter != NULL && target; aIter = aIter->next) { int max = 0; xml_acl_t *acl = aIter->data; if (acl->mode != pcmk__xf_acl_deny) { /* Nothing to do */ } else if (acl->xpath) { int lpc = 0; xmlXPathObjectPtr xpathObj = xpath_search(target, acl->xpath); max = numXpathResults(xpathObj); for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); if (!purge_xml_attributes(match) && (match == target)) { crm_trace("ACLs deny user '%s' access to entire XML document", user); freeXpathObject(xpathObj); return true; } } crm_trace("ACLs deny user '%s' access to %s (%d %s)", user, acl->xpath, max, pcmk__plural_alt(max, "match", "matches")); freeXpathObject(xpathObj); } } if (!purge_xml_attributes(target)) { crm_trace("ACLs deny user '%s' access to entire XML document", user); return true; } if (docpriv->acls) { g_list_free_full(docpriv->acls, free_acl); docpriv->acls = NULL; } else { crm_trace("User '%s' without ACLs denied access to entire XML document", user); free_xml(target); target = NULL; } if (target) { *result = target; } return true; } /*! * \internal * \brief Check whether creation of an XML element is implicitly allowed * * Check whether XML is a "scaffolding" element whose creation is implicitly * allowed regardless of ACLs (that is, it is not in the ACL section and has * no attributes other than \c PCMK_XA_ID). * * \param[in] xml XML element to check * * \return true if XML element is implicitly allowed, false otherwise */ static bool implicitly_allowed(const xmlNode *xml) { GString *path = NULL; for (xmlAttr *prop = xml->properties; prop != NULL; prop = prop->next) { if (strcmp((const char *) prop->name, PCMK_XA_ID) != 0) { return false; } } path = pcmk__element_xpath(xml); CRM_ASSERT(path != NULL); if (strstr((const char *) path->str, "/" PCMK_XE_ACLS "/") != NULL) { g_string_free(path, TRUE); return false; } g_string_free(path, TRUE); return true; } #define display_id(xml) pcmk__s(pcmk__xe_id(xml), "") /*! * \internal * \brief Drop XML nodes created in violation of ACLs * * Given an XML element, free all of its descendant nodes created in violation * of ACLs, with the exception of allowing "scaffolding" elements (i.e. those * that aren't in the ACL section and don't have any attributes other than * \c PCMK_XA_ID). * * \param[in,out] xml XML to check * \param[in] check_top Whether to apply checks to argument itself * (if true, xml might get freed) * * \note This function is recursive */ void pcmk__apply_creation_acl(xmlNode *xml, bool check_top) { xml_node_private_t *nodepriv = xml->_private; if (pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { if (implicitly_allowed(xml)) { - crm_trace("Creation of <%s> scaffolding with id=\"%s\"" + crm_trace("Creation of <%s> scaffolding with " PCMK_XA_ID "=\"%s\"" " is implicitly allowed", xml->name, display_id(xml)); } else if (pcmk__check_acl(xml, NULL, pcmk__xf_acl_write)) { - crm_trace("ACLs allow creation of <%s> with id=\"%s\"", + crm_trace("ACLs allow creation of <%s> with " PCMK_XA_ID "=\"%s\"", xml->name, display_id(xml)); } else if (check_top) { - crm_trace("ACLs disallow creation of <%s> with id=\"%s\"", - xml->name, display_id(xml)); + crm_trace("ACLs disallow creation of <%s> with " + PCMK_XA_ID "=\"%s\"", xml->name, display_id(xml)); pcmk_free_xml_subtree(xml); return; } else { - crm_notice("ACLs would disallow creation of %s<%s> with id=\"%s\"", + crm_notice("ACLs would disallow creation of %s<%s> with " + PCMK_XA_ID "=\"%s\"", ((xml == xmlDocGetRootElement(xml->doc))? "root element " : ""), xml->name, display_id(xml)); } } for (xmlNode *cIter = pcmk__xml_first_child(xml); cIter != NULL; ) { xmlNode *child = cIter; cIter = pcmk__xml_next(cIter); /* In case it is free'd */ pcmk__apply_creation_acl(child, true); } } /*! * \brief Check whether or not an XML node is ACL-denied * * \param[in] xml node to check * * \return true if XML node exists and is ACL-denied, false otherwise */ bool xml_acl_denied(const xmlNode *xml) { if (xml && xml->doc && xml->doc->_private){ xml_doc_private_t *docpriv = xml->doc->_private; return pcmk_is_set(docpriv->flags, pcmk__xf_acl_denied); } return false; } void xml_acl_disable(xmlNode *xml) { if (xml_acl_enabled(xml)) { xml_doc_private_t *docpriv = xml->doc->_private; /* Catch anything that was created but shouldn't have been */ pcmk__apply_acl(xml); pcmk__apply_creation_acl(xml, false); pcmk__clear_xml_flags(docpriv, pcmk__xf_acl_enabled); } } /*! * \brief Check whether or not an XML node is ACL-enabled * * \param[in] xml node to check * * \return true if XML node exists and is ACL-enabled, false otherwise */ bool xml_acl_enabled(const xmlNode *xml) { if (xml && xml->doc && xml->doc->_private){ xml_doc_private_t *docpriv = xml->doc->_private; return pcmk_is_set(docpriv->flags, pcmk__xf_acl_enabled); } return false; } bool pcmk__check_acl(xmlNode *xml, const char *name, enum xml_private_flags mode) { CRM_ASSERT(xml); CRM_ASSERT(xml->doc); CRM_ASSERT(xml->doc->_private); if (pcmk__tracking_xml_changes(xml, false) && xml_acl_enabled(xml)) { xmlNode *parent = xml; xml_doc_private_t *docpriv = xml->doc->_private; GString *xpath = NULL; if (docpriv->acls == NULL) { pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_denied); pcmk__if_tracing({}, return false); xpath = pcmk__element_xpath(xml); if (name != NULL) { pcmk__g_strcat(xpath, "[@", name, "]", NULL); } qb_log_from_external_source(__func__, __FILE__, "User '%s' without ACLs denied %s " "access to %s", LOG_TRACE, __LINE__, 0, docpriv->user, acl_to_text(mode), (const char *) xpath->str); g_string_free(xpath, TRUE); return false; } /* Walk the tree upwards looking for xml_acl_* flags * - Creating an attribute requires write permissions for the node * - Creating a child requires write permissions for the parent */ if (name) { xmlAttr *attr = xmlHasProp(xml, (pcmkXmlStr) name); if (attr && mode == pcmk__xf_acl_create) { mode = pcmk__xf_acl_write; } } while (parent && parent->_private) { xml_node_private_t *nodepriv = parent->_private; if (test_acl_mode(nodepriv->flags, mode)) { return true; } else if (pcmk_is_set(nodepriv->flags, pcmk__xf_acl_deny)) { pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_denied); pcmk__if_tracing({}, return false); xpath = pcmk__element_xpath(xml); if (name != NULL) { pcmk__g_strcat(xpath, "[@", name, "]", NULL); } qb_log_from_external_source(__func__, __FILE__, "%sACL denies user '%s' %s access " "to %s", LOG_TRACE, __LINE__, 0, (parent != xml)? "Parent ": "", docpriv->user, acl_to_text(mode), (const char *) xpath->str); g_string_free(xpath, TRUE); return false; } parent = parent->parent; } pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_denied); pcmk__if_tracing({}, return false); xpath = pcmk__element_xpath(xml); if (name != NULL) { pcmk__g_strcat(xpath, "[@", name, "]", NULL); } qb_log_from_external_source(__func__, __FILE__, "Default ACL denies user '%s' %s access to " "%s", LOG_TRACE, __LINE__, 0, docpriv->user, acl_to_text(mode), (const char *) xpath->str); g_string_free(xpath, TRUE); return false; } return true; } /*! * \brief Check whether ACLs are required for a given user * * \param[in] User name to check * * \return true if the user requires ACLs, false otherwise */ bool pcmk_acl_required(const char *user) { if (pcmk__str_empty(user)) { crm_trace("ACLs not required because no user set"); return false; } else if (!strcmp(user, CRM_DAEMON_USER) || !strcmp(user, "root")) { crm_trace("ACLs not required for privileged user %s", user); return false; } crm_trace("ACLs required for %s", user); return true; } char * pcmk__uid2username(uid_t uid) { char *result = NULL; struct passwd *pwent = getpwuid(uid); if (pwent == NULL) { crm_perror(LOG_INFO, "Cannot get user details for user ID %d", uid); return NULL; } pcmk__str_update(&result, pwent->pw_name); return result; } /*! * \internal * \brief Set the ACL user field properly on an XML request * * Multiple user names are potentially involved in an XML request: the effective * user of the current process; the user name known from an IPC client * connection; and the user name obtained from the request itself, whether by * the current standard XML attribute name or an older legacy attribute name. * This function chooses the appropriate one that should be used for ACLs, sets * it in the request (using the standard attribute name, and the legacy name if * given), and returns it. * * \param[in,out] request XML request to update * \param[in] field Alternate name for ACL user name XML attribute * \param[in] peer_user User name as known from IPC connection * * \return ACL user name actually used */ const char * pcmk__update_acl_user(xmlNode *request, const char *field, const char *peer_user) { static const char *effective_user = NULL; const char *requested_user = NULL; const char *user = NULL; if (effective_user == NULL) { effective_user = pcmk__uid2username(geteuid()); if (effective_user == NULL) { effective_user = strdup("#unprivileged"); CRM_CHECK(effective_user != NULL, return NULL); crm_err("Unable to determine effective user, assuming unprivileged for ACLs"); } } requested_user = crm_element_value(request, PCMK_XE_ACL_TARGET); if (requested_user == NULL) { /* @COMPAT rolling upgrades <=1.1.11 * * field is checked for backward compatibility with older versions that * did not use PCMK_XE_ACL_TARGET. */ requested_user = crm_element_value(request, field); } if (!pcmk__is_privileged(effective_user)) { /* We're not running as a privileged user, set or overwrite any existing * value for PCMK_XE_ACL_TARGET */ user = effective_user; } else if (peer_user == NULL && requested_user == NULL) { /* No user known or requested, use 'effective_user' and make sure one is * set for the request */ user = effective_user; } else if (peer_user == NULL) { /* No user known, trusting 'requested_user' */ user = requested_user; } else if (!pcmk__is_privileged(peer_user)) { /* The peer is not a privileged user, set or overwrite any existing * value for PCMK_XE_ACL_TARGET */ user = peer_user; } else if (requested_user == NULL) { /* Even if we're privileged, make sure there is always a value set */ user = peer_user; } else { /* Legal delegation to 'requested_user' */ user = requested_user; } // This requires pointer comparison, not string comparison if (user != crm_element_value(request, PCMK_XE_ACL_TARGET)) { crm_xml_add(request, PCMK_XE_ACL_TARGET, user); } if (field != NULL && user != crm_element_value(request, field)) { crm_xml_add(request, field, user); } return requested_user; } diff --git a/lib/common/xml.c b/lib/common/xml.c index dd87f98ced..8e15afb2f8 100644 --- a/lib/common/xml.c +++ b/lib/common/xml.c @@ -1,2269 +1,2270 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // stat(), S_ISREG, etc. #include #include #include #include #include #include // PCMK__XML_LOG_BASE, etc. #include "crmcommon_private.h" // Define this as 1 in development to get insanely verbose trace messages #ifndef XML_PARSER_DEBUG #define XML_PARSER_DEBUG 0 #endif bool pcmk__tracking_xml_changes(xmlNode *xml, bool lazy) { if(xml == NULL || xml->doc == NULL || xml->doc->_private == NULL) { return FALSE; } else if (!pcmk_is_set(((xml_doc_private_t *)xml->doc->_private)->flags, pcmk__xf_tracking)) { return FALSE; } else if (lazy && !pcmk_is_set(((xml_doc_private_t *)xml->doc->_private)->flags, pcmk__xf_lazy)) { return FALSE; } return TRUE; } static inline void set_parent_flag(xmlNode *xml, long flag) { for(; xml; xml = xml->parent) { xml_node_private_t *nodepriv = xml->_private; if (nodepriv == NULL) { /* During calls to xmlDocCopyNode(), _private will be unset for parent nodes */ } else { pcmk__set_xml_flags(nodepriv, flag); } } } void pcmk__set_xml_doc_flag(xmlNode *xml, enum xml_private_flags flag) { if(xml && xml->doc && xml->doc->_private){ /* During calls to xmlDocCopyNode(), xml->doc may be unset */ xml_doc_private_t *docpriv = xml->doc->_private; pcmk__set_xml_flags(docpriv, flag); } } // Mark document, element, and all element's parents as changed void pcmk__mark_xml_node_dirty(xmlNode *xml) { pcmk__set_xml_doc_flag(xml, pcmk__xf_dirty); set_parent_flag(xml, pcmk__xf_dirty); } // Clear flags on XML node and its children static void reset_xml_node_flags(xmlNode *xml) { xmlNode *cIter = NULL; xml_node_private_t *nodepriv = xml->_private; if (nodepriv) { nodepriv->flags = 0; } for (cIter = pcmk__xml_first_child(xml); cIter != NULL; cIter = pcmk__xml_next(cIter)) { reset_xml_node_flags(cIter); } } // Set xpf_created flag on XML node and any children void pcmk__mark_xml_created(xmlNode *xml) { xmlNode *cIter = NULL; xml_node_private_t *nodepriv = NULL; CRM_ASSERT(xml != NULL); nodepriv = xml->_private; if (nodepriv && pcmk__tracking_xml_changes(xml, FALSE)) { if (!pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { pcmk__set_xml_flags(nodepriv, pcmk__xf_created); pcmk__mark_xml_node_dirty(xml); } for (cIter = pcmk__xml_first_child(xml); cIter != NULL; cIter = pcmk__xml_next(cIter)) { pcmk__mark_xml_created(cIter); } } } #define XML_DOC_PRIVATE_MAGIC 0x81726354UL #define XML_NODE_PRIVATE_MAGIC 0x54637281UL // Free an XML object previously marked as deleted static void free_deleted_object(void *data) { if(data) { pcmk__deleted_xml_t *deleted_obj = data; free(deleted_obj->path); free(deleted_obj); } } // Free and NULL user, ACLs, and deleted objects in an XML node's private data static void reset_xml_private_data(xml_doc_private_t *docpriv) { if (docpriv != NULL) { CRM_ASSERT(docpriv->check == XML_DOC_PRIVATE_MAGIC); free(docpriv->user); docpriv->user = NULL; if (docpriv->acls != NULL) { pcmk__free_acls(docpriv->acls); docpriv->acls = NULL; } if(docpriv->deleted_objs) { g_list_free_full(docpriv->deleted_objs, free_deleted_object); docpriv->deleted_objs = NULL; } } } // Free all private data associated with an XML node static void free_private_data(xmlNode *node) { /* Note: This function frees private data assosciated with an XML node, unless the function is being called as a result of internal XSLT cleanup. That could happen through, for example, the following chain of function calls: xsltApplyStylesheetInternal -> xsltFreeTransformContext -> xsltFreeRVTs -> xmlFreeDoc And in that case, the node would fulfill three conditions: 1. It would be a standalone document (i.e. it wouldn't be part of a document) 2. It would have a space-prefixed name (for reference, please see xsltInternals.h: XSLT_MARK_RES_TREE_FRAG) 3. It would carry its own payload in the _private field. We do not free data in this circumstance to avoid a failed assertion on the XML_*_PRIVATE_MAGIC later. */ if (node->name == NULL || node->name[0] != ' ') { if (node->_private) { if (node->type == XML_DOCUMENT_NODE) { reset_xml_private_data(node->_private); } else { CRM_ASSERT(((xml_node_private_t *) node->_private)->check == XML_NODE_PRIVATE_MAGIC); /* nothing dynamically allocated nested */ } free(node->_private); node->_private = NULL; } } } // Allocate and initialize private data for an XML node static void new_private_data(xmlNode *node) { switch (node->type) { case XML_DOCUMENT_NODE: { xml_doc_private_t *docpriv = NULL; docpriv = calloc(1, sizeof(xml_doc_private_t)); CRM_ASSERT(docpriv != NULL); docpriv->check = XML_DOC_PRIVATE_MAGIC; /* Flags will be reset if necessary when tracking is enabled */ pcmk__set_xml_flags(docpriv, pcmk__xf_dirty|pcmk__xf_created); node->_private = docpriv; break; } case XML_ELEMENT_NODE: case XML_ATTRIBUTE_NODE: case XML_COMMENT_NODE: { xml_node_private_t *nodepriv = NULL; nodepriv = calloc(1, sizeof(xml_node_private_t)); CRM_ASSERT(nodepriv != NULL); nodepriv->check = XML_NODE_PRIVATE_MAGIC; /* Flags will be reset if necessary when tracking is enabled */ pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_created); node->_private = nodepriv; if (pcmk__tracking_xml_changes(node, FALSE)) { /* XML_ELEMENT_NODE doesn't get picked up here, node->doc is * not hooked up at the point we are called */ pcmk__mark_xml_node_dirty(node); } break; } case XML_TEXT_NODE: case XML_DTD_NODE: case XML_CDATA_SECTION_NODE: break; default: /* Ignore */ crm_trace("Ignoring %p %d", node, node->type); CRM_LOG_ASSERT(node->type == XML_ELEMENT_NODE); break; } } void xml_track_changes(xmlNode * xml, const char *user, xmlNode *acl_source, bool enforce_acls) { xml_accept_changes(xml); crm_trace("Tracking changes%s to %p", enforce_acls?" with ACLs":"", xml); pcmk__set_xml_doc_flag(xml, pcmk__xf_tracking); if(enforce_acls) { if(acl_source == NULL) { acl_source = xml; } pcmk__set_xml_doc_flag(xml, pcmk__xf_acl_enabled); pcmk__unpack_acl(acl_source, xml, user); pcmk__apply_acl(xml); } } bool xml_tracking_changes(xmlNode * xml) { return (xml != NULL) && (xml->doc != NULL) && (xml->doc->_private != NULL) && pcmk_is_set(((xml_doc_private_t *)(xml->doc->_private))->flags, pcmk__xf_tracking); } bool xml_document_dirty(xmlNode *xml) { return (xml != NULL) && (xml->doc != NULL) && (xml->doc->_private != NULL) && pcmk_is_set(((xml_doc_private_t *)(xml->doc->_private))->flags, pcmk__xf_dirty); } /*! * \internal * \brief Return ordinal position of an XML node among its siblings * * \param[in] xml XML node to check * \param[in] ignore_if_set Don't count siblings with this flag set * * \return Ordinal position of \p xml (starting with 0) */ int pcmk__xml_position(const xmlNode *xml, enum xml_private_flags ignore_if_set) { int position = 0; for (const xmlNode *cIter = xml; cIter->prev; cIter = cIter->prev) { xml_node_private_t *nodepriv = ((xmlNode*)cIter->prev)->_private; if (!pcmk_is_set(nodepriv->flags, ignore_if_set)) { position++; } } return position; } // Remove all attributes marked as deleted from an XML node static void accept_attr_deletions(xmlNode *xml) { // Clear XML node's flags ((xml_node_private_t *) xml->_private)->flags = pcmk__xf_none; // Remove this XML node's attributes that were marked as deleted pcmk__xe_remove_matching_attrs(xml, pcmk__marked_as_deleted, NULL); // Recursively do the same for this XML node's children for (xmlNodePtr cIter = pcmk__xml_first_child(xml); cIter != NULL; cIter = pcmk__xml_next(cIter)) { accept_attr_deletions(cIter); } } /*! * \internal * \brief Find first child XML node matching another given XML node * * \param[in] haystack XML whose children should be checked * \param[in] needle XML to match (comment content or element name and ID) * \param[in] exact If true and needle is a comment, position must match */ xmlNode * pcmk__xml_match(const xmlNode *haystack, const xmlNode *needle, bool exact) { CRM_CHECK(needle != NULL, return NULL); if (needle->type == XML_COMMENT_NODE) { return pcmk__xc_match(haystack, needle, exact); } else { const char *id = pcmk__xe_id(needle); const char *attr = (id == NULL)? NULL : PCMK_XA_ID; return pcmk__xe_match(haystack, (const char *) needle->name, attr, id); } } void xml_accept_changes(xmlNode * xml) { xmlNode *top = NULL; xml_doc_private_t *docpriv = NULL; if(xml == NULL) { return; } crm_trace("Accepting changes to %p", xml); docpriv = xml->doc->_private; top = xmlDocGetRootElement(xml->doc); reset_xml_private_data(xml->doc->_private); if (!pcmk_is_set(docpriv->flags, pcmk__xf_dirty)) { docpriv->flags = pcmk__xf_none; return; } docpriv->flags = pcmk__xf_none; accept_attr_deletions(top); } xmlNode * find_xml_node(const xmlNode *root, const char *search_path, gboolean must_find) { xmlNode *a_child = NULL; const char *name = (root == NULL)? "" : (const char *) root->name; if (search_path == NULL) { crm_warn("Will never find "); return NULL; } for (a_child = pcmk__xml_first_child(root); a_child != NULL; a_child = pcmk__xml_next(a_child)) { if (strcmp((const char *)a_child->name, search_path) == 0) { return a_child; } } if (must_find) { crm_warn("Could not find %s in %s.", search_path, name); } else if (root != NULL) { crm_trace("Could not find %s in %s.", search_path, name); } else { crm_trace("Could not find %s in .", search_path); } return NULL; } #define attr_matches(c, n, v) pcmk__str_eq(crm_element_value((c), (n)), \ (v), pcmk__str_none) /*! * \internal * \brief Find first XML child element matching given criteria * * \param[in] parent XML element to search * \param[in] node_name If not NULL, only match children of this type * \param[in] attr_n If not NULL, only match children with an attribute * of this name. * \param[in] attr_v If \p attr_n and this are not NULL, only match children * with an attribute named \p attr_n and this value * * \return Matching XML child element, or NULL if none found */ xmlNode * pcmk__xe_match(const xmlNode *parent, const char *node_name, const char *attr_n, const char *attr_v) { CRM_CHECK(parent != NULL, return NULL); CRM_CHECK(attr_v == NULL || attr_n != NULL, return NULL); for (xmlNode *child = pcmk__xe_first_child(parent); child != NULL; child = pcmk__xe_next(child)) { if (((node_name == NULL) || pcmk__xe_is(child, node_name)) && ((attr_n == NULL) || (attr_v == NULL && xmlHasProp(child, (pcmkXmlStr) attr_n)) || (attr_v != NULL && attr_matches(child, attr_n, attr_v)))) { return child; } } crm_trace("XML child node <%s%s%s%s%s> not found in %s", (node_name? node_name : "(any)"), (attr_n? " " : ""), (attr_n? attr_n : ""), (attr_n? "=" : ""), (attr_n? attr_v : ""), (const char *) parent->name); return NULL; } void copy_in_properties(xmlNode *target, const xmlNode *src) { if (src == NULL) { crm_warn("No node to copy properties from"); } else if (target == NULL) { crm_err("No node to copy properties into"); } else { for (xmlAttrPtr a = pcmk__xe_first_attr(src); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); expand_plus_plus(target, p_name, p_value); if (xml_acl_denied(target)) { crm_trace("Cannot copy %s=%s to %s", p_name, p_value, target->name); return; } } } return; } /*! * \brief Parse integer assignment statements on this node and all its child * nodes * * \param[in,out] target Root XML node to be processed * * \note This function is recursive */ void fix_plus_plus_recursive(xmlNode *target) { /* TODO: Remove recursion and use xpath searches for value++ */ xmlNode *child = NULL; for (xmlAttrPtr a = pcmk__xe_first_attr(target); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); expand_plus_plus(target, p_name, p_value); } for (child = pcmk__xe_first_child(target); child != NULL; child = pcmk__xe_next(child)) { fix_plus_plus_recursive(child); } } /*! * \brief Update current XML attribute value per parsed integer assignment statement * * \param[in,out] target an XML node, containing a XML attribute that is * initialized to some numeric value, to be processed * \param[in] name name of the XML attribute, e.g. X, whose value * should be updated * \param[in] value assignment statement, e.g. "X++" or * "X+=5", to be applied to the initialized value. * * \note The original XML attribute value is treated as 0 if non-numeric and * truncated to be an integer if decimal-point-containing. * \note The final XML attribute value is truncated to not exceed 1000000. * \note Undefined behavior if unexpected input. */ void expand_plus_plus(xmlNode * target, const char *name, const char *value) { int offset = 1; int name_len = 0; int int_value = 0; int value_len = 0; const char *old_value = NULL; if (target == NULL || value == NULL || name == NULL) { return; } old_value = crm_element_value(target, name); if (old_value == NULL) { /* if no previous value, set unexpanded */ goto set_unexpanded; } else if (strstr(value, name) != value) { goto set_unexpanded; } name_len = strlen(name); value_len = strlen(value); if (value_len < (name_len + 2) || value[name_len] != '+' || (value[name_len + 1] != '+' && value[name_len + 1] != '=')) { goto set_unexpanded; } /* if we are expanding ourselves, * then no previous value was set and leave int_value as 0 */ if (old_value != value) { int_value = char2score(old_value); } if (value[name_len + 1] != '+') { const char *offset_s = value + (name_len + 2); offset = char2score(offset_s); } int_value += offset; if (int_value > PCMK_SCORE_INFINITY) { int_value = PCMK_SCORE_INFINITY; } crm_xml_add_int(target, name, int_value); return; set_unexpanded: if (old_value == value) { /* the old value is already set, nothing to do */ return; } crm_xml_add(target, name, value); return; } /*! * \internal * \brief Remove an XML element's attributes that match some criteria * * \param[in,out] element XML element to modify * \param[in] match If not NULL, only remove attributes for which * this function returns true * \param[in,out] user_data Data to pass to \p match */ void pcmk__xe_remove_matching_attrs(xmlNode *element, bool (*match)(xmlAttrPtr, void *), void *user_data) { xmlAttrPtr next = NULL; for (xmlAttrPtr a = pcmk__xe_first_attr(element); a != NULL; a = next) { next = a->next; // Grab now because attribute might get removed if ((match == NULL) || match(a, user_data)) { if (!pcmk__check_acl(element, NULL, pcmk__xf_acl_write)) { crm_trace("ACLs prevent removal of attributes (%s and " "possibly others) from %s element", (const char *) a->name, (const char *) element->name); return; // ACLs apply to element, not particular attributes } if (pcmk__tracking_xml_changes(element, false)) { // Leave (marked for removal) until after diff is calculated set_parent_flag(element, pcmk__xf_dirty); pcmk__set_xml_flags((xml_node_private_t *) a->_private, pcmk__xf_deleted); } else { xmlRemoveProp(a); } } } } xmlNode * create_xml_node(xmlNode * parent, const char *name) { xmlDoc *doc = NULL; xmlNode *node = NULL; if (pcmk__str_empty(name)) { CRM_CHECK(name != NULL && name[0] == 0, return NULL); return NULL; } if (parent == NULL) { doc = xmlNewDoc(PCMK__XML_VERSION); if (doc == NULL) { return NULL; } node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL); if (node == NULL) { xmlFreeDoc(doc); return NULL; } xmlDocSetRootElement(doc, node); } else { node = xmlNewChild(parent, NULL, (pcmkXmlStr) name, NULL); if (node == NULL) { return NULL; } } pcmk__mark_xml_created(node); return node; } /*! * \internal * \brief Set a given string as an XML node's content * * \param[in,out] node Node whose content to set * \param[in] content String to set as the content * * \note \c xmlNodeSetContent() does not escape special characters. */ void pcmk__xe_set_content(xmlNode *node, const char *content) { if (node != NULL) { char *escaped = pcmk__xml_escape(content, false); xmlNodeSetContent(node, (pcmkXmlStr) escaped); free(escaped); } } xmlNode * pcmk_create_xml_text_node(xmlNode * parent, const char *name, const char *content) { xmlNode *node = create_xml_node(parent, name); pcmk__xe_set_content(node, content); return node; } xmlNode * pcmk_create_html_node(xmlNode * parent, const char *element_name, const char *id, const char *class_name, const char *text) { xmlNode *node = pcmk_create_xml_text_node(parent, element_name, text); if (class_name != NULL) { crm_xml_add(node, PCMK_XA_CLASS, class_name); } if (id != NULL) { crm_xml_add(node, PCMK_XA_ID, id); } return node; } /*! * Free an XML element and all of its children, removing it from its parent * * \param[in,out] xml XML element to free */ void pcmk_free_xml_subtree(xmlNode *xml) { xmlUnlinkNode(xml); // Detaches from parent and siblings xmlFreeNode(xml); // Frees } static void free_xml_with_position(xmlNode * child, int position) { if (child != NULL) { xmlNode *top = NULL; xmlDoc *doc = child->doc; xml_node_private_t *nodepriv = child->_private; xml_doc_private_t *docpriv = NULL; if (doc != NULL) { top = xmlDocGetRootElement(doc); } if (doc != NULL && top == child) { /* Free everything */ xmlFreeDoc(doc); } else if (pcmk__check_acl(child, NULL, pcmk__xf_acl_write) == FALSE) { GString *xpath = NULL; pcmk__if_tracing({}, return); xpath = pcmk__element_xpath(child); qb_log_from_external_source(__func__, __FILE__, "Cannot remove %s %x", LOG_TRACE, __LINE__, 0, (const char *) xpath->str, nodepriv->flags); g_string_free(xpath, TRUE); return; } else { if (doc && pcmk__tracking_xml_changes(child, FALSE) && !pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { GString *xpath = pcmk__element_xpath(child); if (xpath != NULL) { pcmk__deleted_xml_t *deleted_obj = NULL; crm_trace("Deleting %s %p from %p", (const char *) xpath->str, child, doc); deleted_obj = calloc(1, sizeof(pcmk__deleted_xml_t)); deleted_obj->path = strdup((const char *) xpath->str); CRM_ASSERT(deleted_obj->path != NULL); g_string_free(xpath, TRUE); deleted_obj->position = -1; /* Record the "position" only for XML comments for now */ if (child->type == XML_COMMENT_NODE) { if (position >= 0) { deleted_obj->position = position; } else { deleted_obj->position = pcmk__xml_position(child, pcmk__xf_skip); } } docpriv = doc->_private; docpriv->deleted_objs = g_list_append(docpriv->deleted_objs, deleted_obj); pcmk__set_xml_doc_flag(child, pcmk__xf_dirty); } } pcmk_free_xml_subtree(child); } } } void free_xml(xmlNode * child) { free_xml_with_position(child, -1); } /*! * \internal * \brief Make a deep copy of an XML node under a given parent * * \param[in,out] parent XML element that will be the copy's parent (\c NULL * to create a new XML document with the copy as root) * \param[in] src XML node to copy * * \return Deep copy of \p src, or \c NULL if \p src is \c NULL */ xmlNode * pcmk__xml_copy(xmlNode *parent, xmlNode *src) { xmlNode *copy = NULL; if (src == NULL) { return NULL; } if (parent == NULL) { xmlDoc *doc = NULL; // The copy will be the root element of a new document CRM_ASSERT(src->type == XML_ELEMENT_NODE); doc = xmlNewDoc(PCMK__XML_VERSION); CRM_ASSERT(doc != NULL); copy = xmlDocCopyNode(src, doc, 1); CRM_ASSERT(copy != NULL); xmlDocSetRootElement(doc, copy); } else { copy = xmlDocCopyNode(src, parent->doc, 1); CRM_ASSERT(copy != NULL); xmlAddChild(parent, copy); } pcmk__mark_xml_created(copy); return copy; } /*! * \internal * \brief Remove XML text nodes from specified XML and all its children * * \param[in,out] xml XML to strip text from */ void pcmk__strip_xml_text(xmlNode *xml) { xmlNode *iter = xml->children; while (iter) { xmlNode *next = iter->next; switch (iter->type) { case XML_TEXT_NODE: /* Remove it */ pcmk_free_xml_subtree(iter); break; case XML_ELEMENT_NODE: /* Search it */ pcmk__strip_xml_text(iter); break; default: /* Leave it */ break; } iter = next; } } /*! * \internal * \brief Add a "last written" attribute to an XML element, set to current time * * \param[in,out] xe XML element to add attribute to * * \return Value that was set, or NULL on error */ const char * pcmk__xe_add_last_written(xmlNode *xe) { char *now_s = pcmk__epoch2str(NULL, 0); const char *result = NULL; result = crm_xml_add(xe, PCMK_XA_CIB_LAST_WRITTEN, pcmk__s(now_s, "Could not determine current time")); free(now_s); return result; } /*! * \brief Sanitize a string so it is usable as an XML ID * * \param[in,out] id String to sanitize */ void crm_xml_sanitize_id(char *id) { char *c; for (c = id; *c; ++c) { /* @TODO Sanitize more comprehensively */ switch (*c) { case ':': case '#': *c = '.'; } } } /*! * \brief Set the ID of an XML element using a format * * \param[in,out] xml XML element * \param[in] fmt printf-style format * \param[in] ... any arguments required by format */ void crm_xml_set_id(xmlNode *xml, const char *format, ...) { va_list ap; int len = 0; char *id = NULL; /* equivalent to crm_strdup_printf() */ va_start(ap, format); len = vasprintf(&id, format, ap); va_end(ap); CRM_ASSERT(len > 0); crm_xml_sanitize_id(id); crm_xml_add(xml, PCMK_XA_ID, id); free(id); } /*! * \internal * \brief Get consecutive bytes encoding non-ASCII UTF-8 characters * * \param[in] text String to check * * \return Number of non-ASCII UTF-8 bytes at the beginning of \p text */ static size_t utf8_bytes(const char *text) { // Total number of consecutive bytes containing UTF-8 characters size_t c_bytes = 0; if (text == NULL) { return 0; } /* UTF-8 uses one to four 8-bit bytes per character. The first byte * indicates the width of the character. A byte beginning with a '0' bit is * a one-byte ASCII character. * * A C byte is 8 bits on most systems, but this is not guaranteed. * * Count until we find an ASCII character or an invalid byte. Check bytes * aligned with the C byte boundary. */ for (const uint8_t *utf8_byte = (const uint8_t *) text; (*utf8_byte & 0x80) != 0; utf8_byte = (const uint8_t *) (text + c_bytes)) { size_t utf8_bits = 0; if ((*utf8_byte & 0xf0) == 0xf0) { // Four-byte character (first byte: 11110xxx) utf8_bits = 32; } else if ((*utf8_byte & 0xe0) == 0xe0) { // Three-byte character (first byte: 1110xxxx) utf8_bits = 24; } else if ((*utf8_byte & 0xc0) == 0xc0) { // Two-byte character (first byte: 110xxxxx) utf8_bits = 16; } else { crm_warn("Found invalid UTF-8 character %.2x", (unsigned char) *utf8_byte); return c_bytes; } c_bytes += utf8_bits / CHAR_BIT; #if (CHAR_BIT != 8) // Coverity complains about dead code without this CPP guard if ((utf8_bits % CHAR_BIT) > 0) { c_bytes++; } #endif // CHAR_BIT != 8 } return c_bytes; } /*! * \internal * \brief Replace a character in a dynamically allocated string, reallocating * memory * * \param[in,out] text String to replace a character in * \param[in,out] index Index of character to replace with new string; on * return, reset to index of end of replacement string * \param[in,out] length Length of \p text * \param[in] replace String to replace character at \p index with (must * not be empty) * * \return \p text, with the character at \p index replaced by \p replace */ static char * replace_text(char *text, size_t *index, size_t *length, const char *replace) { /* @TODO Replace with GString? Or at least copy char-by-char, escaping * characters as needed, instead of shifting characters on every replacement */ // We have space for 1 char already size_t offset = strlen(replace) - 1; if (offset > 0) { *length += offset; text = pcmk__realloc(text, *length + 1); // Shift characters to the right to make room for the replacement string for (size_t i = *length; i > (*index + offset); i--) { text[i] = text[i - offset]; } } // Replace the character at index by the replacement string memcpy(text + *index, replace, offset + 1); // Reset index to the end of replacement string *index += offset; return text; } /*! * \internal * \brief Check whether a string has XML special characters that must be escaped * * See \c pcmk__xml_escape() for more details. * * \param[in] text String to check * \param[in] escape_quote If \c true, double quotes must be escaped * * \return \c true if \p text has special characters that need to be escaped, or * \c false otherwise */ bool pcmk__xml_needs_escape(const char *text, bool escape_quote) { size_t length = 0; if (text == NULL) { return false; } length = strlen(text); for (size_t index = 0; index < length; index++) { // Don't escape any non-ASCII characters index += utf8_bytes(&(text[index])); switch (text[index]) { case '\0': // Reached end of string by skipping UTF-8 bytes return false; case '<': return true; case '>': // Not necessary, but for symmetry with '<' return true; case '&': return true; case '"': if (escape_quote) { return true; } break; case '\n': case '\t': // Don't escape newline or tab break; default: if ((text[index] < 0x20) || (text[index] >= 0x7f)) { // Escape non-printing characters return true; } break; } } return false; } /*! * \internal * \brief Replace special characters with their XML escape sequences * * XML allows the escaping of special characters by replacing them with entity * references (for example, """) or character references (for * example, " "). * * The special characters '<' and '&' are not allowed in their * literal forms in XML character data. Character data is non-markup text (for * example, the content of a text node). * * Additionally, if an attribute value is delimited by single quotes, then * single quotes must be escaped within the value. Similarly, if an attribute * value is delimited by double quotes, then double quotes must be escaped * within the value. * * For more details, see the "Character Data and Markup" section of the XML * spec, currently section 2.4: * https://www.w3.org/TR/xml/#dt-markup * * Pacemaker always delimits attribute values with double quotes, so this * function doesn't escape single quotes. * * \param[in] text Text to escape * \param[in] escape_quote If \c true, escape double quotes (should be enabled * for attribute values) * * \return Newly allocated string equivalent to \p text but with special * characters replaced with XML escape sequences (or \c NULL if \p text * is \c NULL). If \p text is not \c NULL, the return value is * guaranteed not to be \c NULL. * * \note There are libxml functions that purport to do this: * \c xmlEncodeEntitiesReentrant() and \c xmlEncodeSpecialChars(). * However, their escaping is incomplete. See: * https://discourse.gnome.org/t/intended-use-of-xmlencodeentitiesreentrant-vs-xmlencodespecialchars/19252 */ char * pcmk__xml_escape(const char *text, bool escape_quote) { size_t length = 0; char *copy = NULL; char buf[32] = { '\0', }; if (text == NULL) { return NULL; } length = strlen(text); pcmk__str_update(©, text); for (size_t index = 0; index < length; index++) { // Don't escape any non-ASCII characters index += utf8_bytes(&(copy[index])); switch (copy[index]) { case '\0': // Reached end of string by skipping UTF-8 bytes break; case '<': copy = replace_text(copy, &index, &length, "<"); break; case '>': // Not necessary, but for symmetry with '<' copy = replace_text(copy, &index, &length, ">"); break; case '&': copy = replace_text(copy, &index, &length, "&"); break; case '"': if (escape_quote) { copy = replace_text(copy, &index, &length, """); } break; case '\n': case '\t': // Don't escape newlines and tabs break; default: if ((copy[index] < 0x20) || (copy[index] >= 0x7f)) { // Escape non-printing characters snprintf(buf, sizeof(buf), "&#x%.2x;", copy[index]); copy = replace_text(copy, &index, &length, buf); } break; } } return copy; } void xml_remove_prop(xmlNode * obj, const char *name) { if (crm_element_value(obj, name) == NULL) { return; } if (pcmk__check_acl(obj, NULL, pcmk__xf_acl_write) == FALSE) { crm_trace("Cannot remove %s from %s", name, obj->name); } else if (pcmk__tracking_xml_changes(obj, FALSE)) { /* Leave in place (marked for removal) until after the diff is calculated */ xmlAttr *attr = xmlHasProp(obj, (pcmkXmlStr) name); xml_node_private_t *nodepriv = attr->_private; set_parent_flag(obj, pcmk__xf_dirty); pcmk__set_xml_flags(nodepriv, pcmk__xf_deleted); } else { xmlUnsetProp(obj, (pcmkXmlStr) name); } } /*! * \internal * \brief Set a flag on all attributes of an XML element * * \param[in,out] xml XML node to set flags on * \param[in] flag XML private flag to set */ static void set_attrs_flag(xmlNode *xml, enum xml_private_flags flag) { for (xmlAttr *attr = pcmk__xe_first_attr(xml); attr; attr = attr->next) { pcmk__set_xml_flags((xml_node_private_t *) (attr->_private), flag); } } /*! * \internal * \brief Add an XML attribute to a node, marked as deleted * * When calculating XML changes, we need to know when an attribute has been * deleted. Add the attribute back to the new XML, so that we can check the * removal against ACLs, and mark it as deleted for later removal after * differences have been calculated. * * \param[in,out] new_xml XML to modify * \param[in] element Name of XML element that changed (for logging) * \param[in] attr_name Name of attribute that was deleted * \param[in] old_value Value of attribute that was deleted */ static void mark_attr_deleted(xmlNode *new_xml, const char *element, const char *attr_name, const char *old_value) { xml_doc_private_t *docpriv = new_xml->doc->_private; xmlAttr *attr = NULL; xml_node_private_t *nodepriv; // Prevent the dirty flag being set recursively upwards pcmk__clear_xml_flags(docpriv, pcmk__xf_tracking); // Restore the old value (and the tracking flag) attr = xmlSetProp(new_xml, (pcmkXmlStr) attr_name, (pcmkXmlStr) old_value); pcmk__set_xml_flags(docpriv, pcmk__xf_tracking); // Reset flags (so the attribute doesn't appear as newly created) nodepriv = attr->_private; nodepriv->flags = 0; // Check ACLs and mark restored value for later removal xml_remove_prop(new_xml, attr_name); crm_trace("XML attribute %s=%s was removed from %s", attr_name, old_value, element); } /* * \internal * \brief Check ACLs for a changed XML attribute */ static void mark_attr_changed(xmlNode *new_xml, const char *element, const char *attr_name, const char *old_value) { char *vcopy = crm_element_value_copy(new_xml, attr_name); crm_trace("XML attribute %s was changed from '%s' to '%s' in %s", attr_name, old_value, vcopy, element); // Restore the original value xmlSetProp(new_xml, (pcmkXmlStr) attr_name, (pcmkXmlStr) old_value); // Change it back to the new value, to check ACLs crm_xml_add(new_xml, attr_name, vcopy); free(vcopy); } /*! * \internal * \brief Mark an XML attribute as having changed position * * \param[in,out] new_xml XML to modify * \param[in] element Name of XML element that changed (for logging) * \param[in,out] old_attr Attribute that moved, in original XML * \param[in,out] new_attr Attribute that moved, in \p new_xml * \param[in] p_old Ordinal position of \p old_attr in original XML * \param[in] p_new Ordinal position of \p new_attr in \p new_xml */ static void mark_attr_moved(xmlNode *new_xml, const char *element, xmlAttr *old_attr, xmlAttr *new_attr, int p_old, int p_new) { xml_node_private_t *nodepriv = new_attr->_private; crm_trace("XML attribute %s moved from position %d to %d in %s", old_attr->name, p_old, p_new, element); // Mark document, element, and all element's parents as changed pcmk__mark_xml_node_dirty(new_xml); // Mark attribute as changed pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_moved); nodepriv = (p_old > p_new)? old_attr->_private : new_attr->_private; pcmk__set_xml_flags(nodepriv, pcmk__xf_skip); } /*! * \internal * \brief Calculate differences in all previously existing XML attributes * * \param[in,out] old_xml Original XML to compare * \param[in,out] new_xml New XML to compare */ static void xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml) { xmlAttr *attr_iter = pcmk__xe_first_attr(old_xml); while (attr_iter != NULL) { const char *name = (const char *) attr_iter->name; xmlAttr *old_attr = attr_iter; xmlAttr *new_attr = xmlHasProp(new_xml, attr_iter->name); const char *old_value = pcmk__xml_attr_value(attr_iter); attr_iter = attr_iter->next; if (new_attr == NULL) { mark_attr_deleted(new_xml, (const char *) old_xml->name, name, old_value); } else { xml_node_private_t *nodepriv = new_attr->_private; int new_pos = pcmk__xml_position((xmlNode*) new_attr, pcmk__xf_skip); int old_pos = pcmk__xml_position((xmlNode*) old_attr, pcmk__xf_skip); const char *new_value = crm_element_value(new_xml, name); // This attribute isn't new pcmk__clear_xml_flags(nodepriv, pcmk__xf_created); if (strcmp(new_value, old_value) != 0) { mark_attr_changed(new_xml, (const char *) old_xml->name, name, old_value); } else if ((old_pos != new_pos) && !pcmk__tracking_xml_changes(new_xml, TRUE)) { mark_attr_moved(new_xml, (const char *) old_xml->name, old_attr, new_attr, old_pos, new_pos); } } } } /*! * \internal * \brief Check all attributes in new XML for creation * * For each of a given XML element's attributes marked as newly created, accept * (and mark as dirty) or reject the creation according to ACLs. * * \param[in,out] new_xml XML to check */ static void mark_created_attrs(xmlNode *new_xml) { xmlAttr *attr_iter = pcmk__xe_first_attr(new_xml); while (attr_iter != NULL) { xmlAttr *new_attr = attr_iter; xml_node_private_t *nodepriv = attr_iter->_private; attr_iter = attr_iter->next; if (pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { const char *attr_name = (const char *) new_attr->name; crm_trace("Created new attribute %s=%s in %s", attr_name, pcmk__xml_attr_value(new_attr), new_xml->name); /* Check ACLs (we can't use the remove-then-create trick because it * would modify the attribute position). */ if (pcmk__check_acl(new_xml, attr_name, pcmk__xf_acl_write)) { pcmk__mark_xml_attr_dirty(new_attr); } else { // Creation was not allowed, so remove the attribute xmlUnsetProp(new_xml, new_attr->name); } } } } /*! * \internal * \brief Calculate differences in attributes between two XML nodes * * \param[in,out] old_xml Original XML to compare * \param[in,out] new_xml New XML to compare */ static void xml_diff_attrs(xmlNode *old_xml, xmlNode *new_xml) { set_attrs_flag(new_xml, pcmk__xf_created); // cleared later if not really new xml_diff_old_attrs(old_xml, new_xml); mark_created_attrs(new_xml); } /*! * \internal * \brief Add an XML child element to a node, marked as deleted * * When calculating XML changes, we need to know when a child element has been * deleted. Add the child back to the new XML, so that we can check the removal * against ACLs, and mark it as deleted for later removal after differences have * been calculated. * * \param[in,out] old_child Child element from original XML * \param[in,out] new_parent New XML to add marked copy to */ static void mark_child_deleted(xmlNode *old_child, xmlNode *new_parent) { // Re-create the child element so we can check ACLs xmlNode *candidate = pcmk__xml_copy(new_parent, old_child); // Clear flags on new child and its children reset_xml_node_flags(candidate); // Check whether ACLs allow the deletion pcmk__apply_acl(xmlDocGetRootElement(candidate->doc)); // Remove the child again (which will track it in document's deleted_objs) free_xml_with_position(candidate, pcmk__xml_position(old_child, pcmk__xf_skip)); if (pcmk__xml_match(new_parent, old_child, true) == NULL) { pcmk__set_xml_flags((xml_node_private_t *) (old_child->_private), pcmk__xf_skip); } } static void mark_child_moved(xmlNode *old_child, xmlNode *new_parent, xmlNode *new_child, int p_old, int p_new) { xml_node_private_t *nodepriv = new_child->_private; - crm_trace("Child element %s with id='%s' moved from position %d to %d under %s", + crm_trace("Child element %s with " + PCMK_XA_ID "='%s' moved from position %d to %d under %s", new_child->name, pcmk__s(pcmk__xe_id(new_child), ""), p_old, p_new, new_parent->name); pcmk__mark_xml_node_dirty(new_parent); pcmk__set_xml_flags(nodepriv, pcmk__xf_moved); if (p_old > p_new) { nodepriv = old_child->_private; } else { nodepriv = new_child->_private; } pcmk__set_xml_flags(nodepriv, pcmk__xf_skip); } // Given original and new XML, mark new XML portions that have changed static void mark_xml_changes(xmlNode *old_xml, xmlNode *new_xml, bool check_top) { xmlNode *cIter = NULL; xml_node_private_t *nodepriv = NULL; CRM_CHECK(new_xml != NULL, return); if (old_xml == NULL) { pcmk__mark_xml_created(new_xml); pcmk__apply_creation_acl(new_xml, check_top); return; } nodepriv = new_xml->_private; CRM_CHECK(nodepriv != NULL, return); if(nodepriv->flags & pcmk__xf_processed) { /* Avoid re-comparing nodes */ return; } pcmk__set_xml_flags(nodepriv, pcmk__xf_processed); xml_diff_attrs(old_xml, new_xml); // Check for differences in the original children for (cIter = pcmk__xml_first_child(old_xml); cIter != NULL; ) { xmlNode *old_child = cIter; xmlNode *new_child = pcmk__xml_match(new_xml, cIter, true); cIter = pcmk__xml_next(cIter); if(new_child) { mark_xml_changes(old_child, new_child, TRUE); } else { mark_child_deleted(old_child, new_xml); } } // Check for moved or created children for (cIter = pcmk__xml_first_child(new_xml); cIter != NULL; ) { xmlNode *new_child = cIter; xmlNode *old_child = pcmk__xml_match(old_xml, cIter, true); cIter = pcmk__xml_next(cIter); if(old_child == NULL) { // This is a newly created child nodepriv = new_child->_private; pcmk__set_xml_flags(nodepriv, pcmk__xf_skip); mark_xml_changes(old_child, new_child, TRUE); } else { /* Check for movement, we already checked for differences */ int p_new = pcmk__xml_position(new_child, pcmk__xf_skip); int p_old = pcmk__xml_position(old_child, pcmk__xf_skip); if(p_old != p_new) { mark_child_moved(old_child, new_xml, new_child, p_old, p_new); } } } } void xml_calculate_significant_changes(xmlNode *old_xml, xmlNode *new_xml) { pcmk__set_xml_doc_flag(new_xml, pcmk__xf_lazy); xml_calculate_changes(old_xml, new_xml); } // Called functions may set the \p pcmk__xf_skip flag on parts of \p old_xml void xml_calculate_changes(xmlNode *old_xml, xmlNode *new_xml) { CRM_CHECK((old_xml != NULL) && (new_xml != NULL) && pcmk__xe_is(old_xml, (const char *) new_xml->name) && pcmk__str_eq(pcmk__xe_id(old_xml), pcmk__xe_id(new_xml), pcmk__str_none), return); if(xml_tracking_changes(new_xml) == FALSE) { xml_track_changes(new_xml, NULL, NULL, FALSE); } mark_xml_changes(old_xml, new_xml, FALSE); } /*! * \internal * \brief Find a comment with matching content in specified XML * * \param[in] root XML to search * \param[in] search_comment Comment whose content should be searched for * \param[in] exact If true, comment must also be at same position */ xmlNode * pcmk__xc_match(const xmlNode *root, const xmlNode *search_comment, bool exact) { xmlNode *a_child = NULL; int search_offset = pcmk__xml_position(search_comment, pcmk__xf_skip); CRM_CHECK(search_comment->type == XML_COMMENT_NODE, return NULL); for (a_child = pcmk__xml_first_child(root); a_child != NULL; a_child = pcmk__xml_next(a_child)) { if (exact) { int offset = pcmk__xml_position(a_child, pcmk__xf_skip); xml_node_private_t *nodepriv = a_child->_private; if (offset < search_offset) { continue; } else if (offset > search_offset) { return NULL; } if (pcmk_is_set(nodepriv->flags, pcmk__xf_skip)) { continue; } } if (a_child->type == XML_COMMENT_NODE && pcmk__str_eq((const char *)a_child->content, (const char *)search_comment->content, pcmk__str_casei)) { return a_child; } else if (exact) { return NULL; } } return NULL; } /*! * \internal * \brief Make one XML comment match another (in content) * * \param[in,out] parent If \p target is NULL and this is not, add or update * comment child of this XML node that matches \p update * \param[in,out] target If not NULL, update this XML comment node * \param[in] update Make comment content match this (must not be NULL) * * \note At least one of \parent and \target must be non-NULL */ void pcmk__xc_update(xmlNode *parent, xmlNode *target, xmlNode *update) { CRM_CHECK(update != NULL, return); CRM_CHECK(update->type == XML_COMMENT_NODE, return); if (target == NULL) { target = pcmk__xc_match(parent, update, false); } if (target == NULL) { pcmk__xml_copy(parent, update); } else if (!pcmk__str_eq((const char *)target->content, (const char *)update->content, pcmk__str_casei)) { xmlFree(target->content); target->content = xmlStrdup(update->content); } } /*! * \internal * \brief Make one XML tree match another (in children and attributes) * * \param[in,out] parent If \p target is NULL and this is not, add or update * child of this XML node that matches \p update * \param[in,out] target If not NULL, update this XML * \param[in] update Make the desired XML match this (must not be NULL) * \param[in] as_diff If false, expand "++" when making attributes match * * \note At least one of \p parent and \p target must be non-NULL */ void pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update, bool as_diff) { xmlNode *a_child = NULL; const char *object_name = NULL, *object_href = NULL, *object_href_val = NULL; #if XML_PARSER_DEBUG crm_log_xml_trace(update, "update:"); crm_log_xml_trace(target, "target:"); #endif CRM_CHECK(update != NULL, return); if (update->type == XML_COMMENT_NODE) { pcmk__xc_update(parent, target, update); return; } object_name = (const char *) update->name; object_href_val = pcmk__xe_id(update); if (object_href_val != NULL) { object_href = PCMK_XA_ID; } else { object_href_val = crm_element_value(update, PCMK_XA_ID_REF); object_href = (object_href_val == NULL)? NULL : PCMK_XA_ID_REF; } CRM_CHECK(object_name != NULL, return); CRM_CHECK(target != NULL || parent != NULL, return); if (target == NULL) { target = pcmk__xe_match(parent, object_name, object_href, object_href_val); } if (target == NULL) { target = create_xml_node(parent, object_name); CRM_CHECK(target != NULL, return); #if XML_PARSER_DEBUG crm_trace("Added <%s%s%s%s%s/>", pcmk__s(object_name, ""), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); } else { crm_trace("Found node <%s%s%s%s%s/> to update", pcmk__s(object_name, ""), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); #endif } CRM_CHECK(pcmk__xe_is(target, (const char *) update->name), return); if (as_diff == FALSE) { /* So that expand_plus_plus() gets called */ copy_in_properties(target, update); } else { /* No need for expand_plus_plus(), just raw speed */ for (xmlAttrPtr a = pcmk__xe_first_attr(update); a != NULL; a = a->next) { const char *p_value = pcmk__xml_attr_value(a); /* Remove it first so the ordering of the update is preserved */ xmlUnsetProp(target, a->name); xmlSetProp(target, a->name, (pcmkXmlStr) p_value); } } for (a_child = pcmk__xml_first_child(update); a_child != NULL; a_child = pcmk__xml_next(a_child)) { #if XML_PARSER_DEBUG crm_trace("Updating child <%s%s%s%s%s/>", pcmk__s(object_name, ""), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); #endif pcmk__xml_update(target, NULL, a_child, as_diff); } #if XML_PARSER_DEBUG crm_trace("Finished with <%s%s%s%s%s/>", pcmk__s(object_name, ""), object_href ? " " : "", object_href ? object_href : "", object_href ? "=" : "", object_href ? object_href_val : ""); #endif } gboolean update_xml_child(xmlNode * child, xmlNode * to_update) { gboolean can_update = TRUE; xmlNode *child_of_child = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(to_update != NULL, return FALSE); if (!pcmk__xe_is(to_update, (const char *) child->name)) { can_update = FALSE; } else if (!pcmk__str_eq(pcmk__xe_id(to_update), pcmk__xe_id(child), pcmk__str_none)) { can_update = FALSE; } else if (can_update) { #if XML_PARSER_DEBUG crm_log_xml_trace(child, "Update match found..."); #endif pcmk__xml_update(NULL, child, to_update, false); } for (child_of_child = pcmk__xml_first_child(child); child_of_child != NULL; child_of_child = pcmk__xml_next(child_of_child)) { /* only update the first one */ if (can_update) { break; } can_update = update_xml_child(child_of_child, to_update); } return can_update; } int find_xml_children(xmlNode ** children, xmlNode * root, const char *tag, const char *field, const char *value, gboolean search_matches) { int match_found = 0; CRM_CHECK(root != NULL, return FALSE); CRM_CHECK(children != NULL, return FALSE); if ((tag != NULL) && !pcmk__xe_is(root, tag)) { } else if (value != NULL && !pcmk__str_eq(value, crm_element_value(root, field), pcmk__str_casei)) { } else { if (*children == NULL) { *children = create_xml_node(NULL, __func__); } pcmk__xml_copy(*children, root); match_found = 1; } if (search_matches || match_found == 0) { xmlNode *child = NULL; for (child = pcmk__xml_first_child(root); child != NULL; child = pcmk__xml_next(child)) { match_found += find_xml_children(children, child, tag, field, value, search_matches); } } return match_found; } gboolean replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean delete_only) { gboolean can_delete = FALSE; xmlNode *child_of_child = NULL; const char *up_id = NULL; const char *child_id = NULL; const char *right_val = NULL; CRM_CHECK(child != NULL, return FALSE); CRM_CHECK(update != NULL, return FALSE); up_id = pcmk__xe_id(update); child_id = pcmk__xe_id(child); if (up_id == NULL || (child_id && strcmp(child_id, up_id) == 0)) { can_delete = TRUE; } if (!pcmk__xe_is(update, (const char *) child->name)) { can_delete = FALSE; } if (can_delete && delete_only) { for (xmlAttrPtr a = pcmk__xe_first_attr(update); a != NULL; a = a->next) { const char *p_name = (const char *) a->name; const char *p_value = pcmk__xml_attr_value(a); right_val = crm_element_value(child, p_name); if (!pcmk__str_eq(p_value, right_val, pcmk__str_casei)) { can_delete = FALSE; } } } if (can_delete && parent != NULL) { crm_log_xml_trace(child, "Delete match found..."); if (delete_only || update == NULL) { free_xml(child); } else { xmlNode *old = child; xmlNode *new = xmlCopyNode(update, 1); CRM_ASSERT(new != NULL); // May be unnecessary but avoids slight changes to some test outputs reset_xml_node_flags(new); old = xmlReplaceNode(old, new); if (xml_tracking_changes(new)) { // Replaced sections may have included relevant ACLs pcmk__apply_acl(new); } xml_calculate_changes(old, new); xmlFreeNode(old); } return TRUE; } else if (can_delete) { crm_log_xml_debug(child, "Cannot delete the search root"); can_delete = FALSE; } child_of_child = pcmk__xml_first_child(child); while (child_of_child) { xmlNode *next = pcmk__xml_next(child_of_child); can_delete = replace_xml_child(child, child_of_child, update, delete_only); /* only delete the first one */ if (can_delete) { child_of_child = NULL; } else { child_of_child = next; } } return can_delete; } xmlNode * sorted_xml(xmlNode *input, xmlNode *parent, gboolean recursive) { xmlNode *child = NULL; GSList *nvpairs = NULL; xmlNode *result = NULL; CRM_CHECK(input != NULL, return NULL); result = create_xml_node(parent, (const char *) input->name); nvpairs = pcmk_xml_attrs2nvpairs(input); nvpairs = pcmk_sort_nvpairs(nvpairs); pcmk_nvpairs2xml_attrs(nvpairs, result); pcmk_free_nvpairs(nvpairs); for (child = pcmk__xe_first_child(input); child != NULL; child = pcmk__xe_next(child)) { if (recursive) { sorted_xml(child, result, recursive); } else { pcmk__xml_copy(result, child); } } return result; } xmlNode * first_named_child(const xmlNode *parent, const char *name) { xmlNode *match = NULL; for (match = pcmk__xe_first_child(parent); match != NULL; match = pcmk__xe_next(match)) { /* * name == NULL gives first child regardless of name; this is * semantically incorrect in this function, but may be necessary * due to prior use of xml_child_iter_filter */ if ((name == NULL) || pcmk__xe_is(match, name)) { return match; } } return NULL; } /*! * \brief Get next instance of same XML tag * * \param[in] sibling XML tag to start from * * \return Next sibling XML tag with same name */ xmlNode * crm_next_same_xml(const xmlNode *sibling) { xmlNode *match = pcmk__xe_next(sibling); while (match != NULL) { if (pcmk__xe_is(match, (const char *) sibling->name)) { return match; } match = pcmk__xe_next(match); } return NULL; } void crm_xml_init(void) { static bool init = true; if(init) { init = false; /* The default allocator XML_BUFFER_ALLOC_EXACT does far too many * pcmk__realloc()s and it can take upwards of 18 seconds (yes, seconds) * to dump a 28kb tree which XML_BUFFER_ALLOC_DOUBLEIT can do in * less than 1 second. */ xmlSetBufferAllocationScheme(XML_BUFFER_ALLOC_DOUBLEIT); /* Populate and free the _private field when nodes are created and destroyed */ xmlDeregisterNodeDefault(free_private_data); xmlRegisterNodeDefault(new_private_data); crm_schema_init(); } } void crm_xml_cleanup(void) { crm_schema_cleanup(); xmlCleanupParser(); } #define XPATH_MAX 512 xmlNode * expand_idref(xmlNode * input, xmlNode * top) { char *xpath = NULL; const char *ref = NULL; xmlNode *result = NULL; if (input == NULL) { return NULL; } ref = crm_element_value(input, PCMK_XA_ID_REF); if (ref == NULL) { return input; } if (top == NULL) { top = input; } xpath = crm_strdup_printf("//%s[@" PCMK_XA_ID "='%s']", input->name, ref); result = get_xpath_object(xpath, top, LOG_DEBUG); if (result == NULL) { // Not possible with schema validation enabled pcmk__config_err("Ignoring invalid %s configuration: " PCMK_XA_ID_REF " '%s' does not reference " "a valid object " CRM_XS " xpath=%s", input->name, ref, xpath); } free(xpath); return result; } char * pcmk__xml_artefact_root(enum pcmk__xml_artefact_ns ns) { static const char *base = NULL; char *ret = NULL; if (base == NULL) { base = pcmk__env_option(PCMK__ENV_SCHEMA_DIRECTORY); } if (pcmk__str_empty(base)) { base = CRM_SCHEMA_DIRECTORY; } switch (ns) { case pcmk__xml_artefact_ns_legacy_rng: case pcmk__xml_artefact_ns_legacy_xslt: ret = strdup(base); break; case pcmk__xml_artefact_ns_base_rng: case pcmk__xml_artefact_ns_base_xslt: ret = crm_strdup_printf("%s/base", base); break; default: crm_err("XML artefact family specified as %u not recognized", ns); } return ret; } static char * find_artefact(enum pcmk__xml_artefact_ns ns, const char *path, const char *filespec) { char *ret = NULL; switch (ns) { case pcmk__xml_artefact_ns_legacy_rng: case pcmk__xml_artefact_ns_base_rng: if (pcmk__ends_with(filespec, ".rng")) { ret = crm_strdup_printf("%s/%s", path, filespec); } else { ret = crm_strdup_printf("%s/%s.rng", path, filespec); } break; case pcmk__xml_artefact_ns_legacy_xslt: case pcmk__xml_artefact_ns_base_xslt: if (pcmk__ends_with(filespec, ".xsl")) { ret = crm_strdup_printf("%s/%s", path, filespec); } else { ret = crm_strdup_printf("%s/%s.xsl", path, filespec); } break; default: crm_err("XML artefact family specified as %u not recognized", ns); } return ret; } char * pcmk__xml_artefact_path(enum pcmk__xml_artefact_ns ns, const char *filespec) { struct stat sb; char *base = pcmk__xml_artefact_root(ns); char *ret = NULL; ret = find_artefact(ns, base, filespec); free(base); if (stat(ret, &sb) != 0 || !S_ISREG(sb.st_mode)) { const char *remote_schema_dir = pcmk__remote_schema_dir(); ret = find_artefact(ns, remote_schema_dir, filespec); } return ret; } void pcmk__xe_set_propv(xmlNodePtr node, va_list pairs) { while (true) { const char *name, *value; name = va_arg(pairs, const char *); if (name == NULL) { return; } value = va_arg(pairs, const char *); if (value != NULL) { crm_xml_add(node, name, value); } } } void pcmk__xe_set_props(xmlNodePtr node, ...) { va_list pairs; va_start(pairs, node); pcmk__xe_set_propv(node, pairs); va_end(pairs); } int pcmk__xe_foreach_child(xmlNode *xml, const char *child_element_name, int (*handler)(xmlNode *xml, void *userdata), void *userdata) { xmlNode *children = (xml? xml->children : NULL); CRM_ASSERT(handler != NULL); for (xmlNode *node = children; node != NULL; node = node->next) { if ((node->type == XML_ELEMENT_NODE) && ((child_element_name == NULL) || pcmk__xe_is(node, child_element_name))) { int rc = handler(node, userdata); if (rc != pcmk_rc_ok) { return rc; } } } return pcmk_rc_ok; } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include xmlNode * find_entity(xmlNode *parent, const char *node_name, const char *id) { return pcmk__xe_match(parent, node_name, ((id == NULL)? id : PCMK_XA_ID), id); } void crm_destroy_xml(gpointer data) { free_xml(data); } xmlDoc * getDocPtr(xmlNode *node) { xmlDoc *doc = NULL; CRM_CHECK(node != NULL, return NULL); doc = node->doc; if (doc == NULL) { doc = xmlNewDoc(PCMK__XML_VERSION); xmlDocSetRootElement(doc, node); } return doc; } xmlNode * add_node_copy(xmlNode *parent, xmlNode *src_node) { xmlNode *child = NULL; CRM_CHECK((parent != NULL) && (src_node != NULL), return NULL); child = xmlDocCopyNode(src_node, parent->doc, 1); if (child == NULL) { return NULL; } xmlAddChild(parent, child); pcmk__mark_xml_created(child); return child; } int add_node_nocopy(xmlNode *parent, const char *name, xmlNode *child) { add_node_copy(parent, child); free_xml(child); return 1; } gboolean xml_has_children(const xmlNode * xml_root) { if (xml_root != NULL && xml_root->children != NULL) { return TRUE; } return FALSE; } char * crm_xml_escape(const char *text) { size_t length = 0; char *copy = NULL; if (text == NULL) { return NULL; } length = strlen(text); copy = strdup(text); CRM_ASSERT(copy != NULL); for (size_t index = 0; index <= length; index++) { if(copy[index] & 0x80 && copy[index+1] & 0x80){ index++; continue; } switch (copy[index]) { case 0: // Sanity only; loop should stop at the last non-null byte break; case '<': copy = replace_text(copy, &index, &length, "<"); break; case '>': copy = replace_text(copy, &index, &length, ">"); break; case '"': copy = replace_text(copy, &index, &length, """); break; case '\'': copy = replace_text(copy, &index, &length, "'"); break; case '&': copy = replace_text(copy, &index, &length, "&"); break; case '\t': /* Might as well just expand to a few spaces... */ copy = replace_text(copy, &index, &length, " "); break; case '\n': copy = replace_text(copy, &index, &length, "\\n"); break; case '\r': copy = replace_text(copy, &index, &length, "\\r"); break; default: /* Check for and replace non-printing characters with their octal equivalent */ if(copy[index] < ' ' || copy[index] > '~') { char *replace = crm_strdup_printf("\\%.3o", copy[index]); copy = replace_text(copy, &index, &length, replace); free(replace); } } } return copy; } xmlNode * copy_xml(xmlNode *src) { xmlDoc *doc = xmlNewDoc(PCMK__XML_VERSION); xmlNode *copy = xmlDocCopyNode(src, doc, 1); CRM_ASSERT(copy != NULL); xmlDocSetRootElement(doc, copy); return copy; } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/pacemaker/pcmk_graph_consumer.c b/lib/pacemaker/pcmk_graph_consumer.c index 3aef237c2b..f3391589e8 100644 --- a/lib/pacemaker/pcmk_graph_consumer.c +++ b/lib/pacemaker/pcmk_graph_consumer.c @@ -1,882 +1,883 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include /* * Functions for updating graph */ /*! * \internal * \brief Update synapse after completed prerequisite * * A synapse is ready to be executed once all its prerequisite actions (inputs) * complete. Given a completed action, check whether it is an input for a given * synapse, and if so, mark the input as confirmed, and mark the synapse as * ready if appropriate. * * \param[in,out] synapse Transition graph synapse to update * \param[in] action_id ID of an action that completed * * \note The only substantial effect here is confirming synapse inputs. * should_fire_synapse() will recalculate pcmk__synapse_ready, so the only * thing that uses the pcmk__synapse_ready from here is * synapse_state_str(). */ static void update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id) { if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) { return; // All inputs have already been confirmed } // Presume ready until proven otherwise pcmk__set_synapse_flags(synapse, pcmk__synapse_ready); for (GList *lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data; if (prereq->id == action_id) { crm_trace("Confirming input %d of synapse %d", action_id, synapse->id); pcmk__set_graph_action_flags(prereq, pcmk__graph_action_confirmed); } else if (!pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed)) { pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready); crm_trace("Synapse %d still not ready after action %d", synapse->id, action_id); } } if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) { crm_trace("Synapse %d is now ready to execute", synapse->id); } } /*! * \internal * \brief Update action and synapse confirmation after action completion * * \param[in,out] synapse Transition graph synapse that action belongs to * \param[in] action_id ID of action that completed */ static void update_synapse_confirmed(pcmk__graph_synapse_t *synapse, int action_id) { bool all_confirmed = true; for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data; if (action->id == action_id) { crm_trace("Confirmed action %d of synapse %d", action_id, synapse->id); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); } else if (all_confirmed && !pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { all_confirmed = false; crm_trace("Synapse %d still not confirmed after action %d", synapse->id, action_id); } } if (all_confirmed && !pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) { crm_trace("Confirmed synapse %d", synapse->id); pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed); } } /*! * \internal * \brief Update the transition graph with a completed action result * * \param[in,out] graph Transition graph to update * \param[in] action Action that completed */ void pcmk__update_graph(pcmk__graph_t *graph, const pcmk__graph_action_t *action) { for (GList *lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_failed)) { continue; // This synapse already completed } else if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) { update_synapse_confirmed(synapse, action->id); } else if (!pcmk_is_set(action->flags, pcmk__graph_action_failed) || (synapse->priority == PCMK_SCORE_INFINITY)) { update_synapse_ready(synapse, action->id); } } } /* * Functions for executing graph */ /* A transition graph consists of various types of actions. The library caller * registers execution functions for each action type, which will be stored * here. */ static pcmk__graph_functions_t *graph_fns = NULL; /*! * \internal * \brief Set transition graph execution functions * * \param[in] Execution functions to use */ void pcmk__set_graph_functions(pcmk__graph_functions_t *fns) { crm_debug("Setting custom functions for executing transition graphs"); graph_fns = fns; CRM_ASSERT(graph_fns != NULL); CRM_ASSERT(graph_fns->rsc != NULL); CRM_ASSERT(graph_fns->cluster != NULL); CRM_ASSERT(graph_fns->pseudo != NULL); CRM_ASSERT(graph_fns->fence != NULL); } /*! * \internal * \brief Check whether a graph synapse is ready to be executed * * \param[in,out] graph Transition graph that synapse is part of * \param[in,out] synapse Synapse to check * * \return true if synapse is ready, false otherwise */ static bool should_fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse) { GList *lpc = NULL; pcmk__set_synapse_flags(synapse, pcmk__synapse_ready); for (lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data; if (!(pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed))) { crm_trace("Input %d for synapse %d not yet confirmed", prereq->id, synapse->id); pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready); break; } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed) && !pcmk_is_set(prereq->flags, pcmk__graph_action_can_fail)) { crm_trace("Input %d for synapse %d confirmed but failed", prereq->id, synapse->id); pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready); break; } } if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) { crm_trace("Synapse %d is ready to execute", synapse->id); } else { return false; } for (lpc = synapse->actions; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *a = (pcmk__graph_action_t *) lpc->data; if (a->type == pcmk__pseudo_graph_action) { /* None of the below applies to pseudo ops */ } else if (synapse->priority < graph->abort_priority) { crm_trace("Skipping synapse %d: priority %d is less than " "abort priority %d", synapse->id, synapse->priority, graph->abort_priority); graph->skipped++; return false; } else if (graph_fns->allowed && !(graph_fns->allowed(graph, a))) { crm_trace("Deferring synapse %d: not allowed", synapse->id); return false; } } return true; } /*! * \internal * \brief Initiate an action from a transition graph * * \param[in,out] graph Transition graph containing action * \param[in,out] action Action to execute * * \return Standard Pacemaker return code */ static int initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *id = pcmk__xe_id(action->xml); CRM_CHECK(id != NULL, return EINVAL); CRM_CHECK(!pcmk_is_set(action->flags, pcmk__graph_action_executed), return pcmk_rc_already); pcmk__set_graph_action_flags(action, pcmk__graph_action_executed); switch (action->type) { case pcmk__pseudo_graph_action: crm_trace("Executing pseudo-action %d (%s)", action->id, id); return graph_fns->pseudo(graph, action); case pcmk__rsc_graph_action: crm_trace("Executing resource action %d (%s)", action->id, id); return graph_fns->rsc(graph, action); case pcmk__cluster_graph_action: if (pcmk__str_eq(crm_element_value(action->xml, PCMK_XA_OPERATION), PCMK_ACTION_STONITH, pcmk__str_none)) { crm_trace("Executing fencing action %d (%s)", action->id, id); return graph_fns->fence(graph, action); } crm_trace("Executing cluster action %d (%s)", action->id, id); return graph_fns->cluster(graph, action); default: crm_err("Unsupported graph action type <%s " PCMK_XA_ID "='%s'> " "(bug?)", action->xml->name, id); return EINVAL; } } /*! * \internal * \brief Execute a graph synapse * * \param[in,out] graph Transition graph with synapse to execute * \param[in,out] synapse Synapse to execute * * \return Standard Pacemaker return value */ static int fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse) { pcmk__set_synapse_flags(synapse, pcmk__synapse_executed); for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data; int rc = initiate_action(graph, action); if (rc != pcmk_rc_ok) { crm_err("Failed initiating <%s " PCMK_XA_ID "=%d> in synapse %d: " "%s", action->xml->name, action->id, synapse->id, pcmk_rc_str(rc)); pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed |pcmk__graph_action_failed); return pcmk_rc_error; } } return pcmk_rc_ok; } /*! * \internal * \brief Dummy graph method that can be used with simulations * * \param[in,out] graph Transition graph containing action * \param[in,out] action Graph action to be initiated * * \return Standard Pacemaker return code * \note If the PE_fail environment variable is set to the action ID, * then the graph action will be marked as failed. */ static int pseudo_action_dummy(pcmk__graph_t *graph, pcmk__graph_action_t *action) { static int fail = -1; if (fail < 0) { long long fail_ll; if ((pcmk__scan_ll(getenv("PE_fail"), &fail_ll, 0LL) == pcmk_rc_ok) && (fail_ll > 0LL) && (fail_ll <= INT_MAX)) { fail = (int) fail_ll; } else { fail = 0; } } if (action->id == fail) { crm_err("Dummy event handler: pretending action %d failed", action->id); pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); graph->abort_priority = PCMK_SCORE_INFINITY; } else { crm_trace("Dummy event handler: action %d initiated", action->id); } pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); pcmk__update_graph(graph, action); return pcmk_rc_ok; } static pcmk__graph_functions_t default_fns = { pseudo_action_dummy, pseudo_action_dummy, pseudo_action_dummy, pseudo_action_dummy }; /*! * \internal * \brief Execute all actions in a transition graph * * \param[in,out] graph Transition graph to execute * * \return Status of transition after execution */ enum pcmk__graph_status pcmk__execute_graph(pcmk__graph_t *graph) { GList *lpc = NULL; int log_level = LOG_DEBUG; enum pcmk__graph_status pass_result = pcmk__graph_active; const char *status = "In progress"; if (graph_fns == NULL) { graph_fns = &default_fns; } if (graph == NULL) { return pcmk__graph_complete; } graph->fired = 0; graph->pending = 0; graph->skipped = 0; graph->completed = 0; graph->incomplete = 0; // Count completed and in-flight synapses for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) { graph->completed++; } else if (!pcmk_is_set(synapse->flags, pcmk__synapse_failed) && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) { graph->pending++; } } crm_trace("Executing graph %d (%d synapses already completed, %d pending)", graph->id, graph->completed, graph->pending); // Execute any synapses that are ready for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if ((graph->batch_limit > 0) && (graph->pending >= graph->batch_limit)) { crm_debug("Throttling graph execution: batch limit (%d) reached", graph->batch_limit); break; } else if (pcmk_is_set(synapse->flags, pcmk__synapse_failed)) { graph->skipped++; continue; } else if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed |pcmk__synapse_executed)) { continue; // Already handled } else if (should_fire_synapse(graph, synapse)) { graph->fired++; if (fire_synapse(graph, synapse) != pcmk_rc_ok) { crm_err("Synapse %d failed to fire", synapse->id); log_level = LOG_ERR; graph->abort_priority = PCMK_SCORE_INFINITY; graph->incomplete++; graph->fired--; } if (!(pcmk_is_set(synapse->flags, pcmk__synapse_confirmed))) { graph->pending++; } } else { crm_trace("Synapse %d cannot fire", synapse->id); graph->incomplete++; } } if ((graph->pending == 0) && (graph->fired == 0)) { graph->complete = true; if ((graph->incomplete != 0) && (graph->abort_priority <= 0)) { log_level = LOG_WARNING; pass_result = pcmk__graph_terminated; status = "Terminated"; } else if (graph->skipped != 0) { log_level = LOG_NOTICE; pass_result = pcmk__graph_complete; status = "Stopped"; } else { log_level = LOG_NOTICE; pass_result = pcmk__graph_complete; status = "Complete"; } } else if (graph->fired == 0) { pass_result = pcmk__graph_pending; } do_crm_log(log_level, "Transition %d (Complete=%d, Pending=%d," " Fired=%d, Skipped=%d, Incomplete=%d, Source=%s): %s", graph->id, graph->completed, graph->pending, graph->fired, graph->skipped, graph->incomplete, graph->source, status); return pass_result; } /* * Functions for unpacking transition graph XML into structs */ /*! * \internal * \brief Unpack a transition graph action from XML * * \param[in] parent Synapse that action is part of * \param[in] xml_action Action XML to unparse * * \return Newly allocated action on success, or NULL otherwise */ static pcmk__graph_action_t * unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action) { enum pcmk__graph_action_type action_type; pcmk__graph_action_t *action = NULL; const char *value = pcmk__xe_id(xml_action); if (value == NULL) { - crm_err("Ignoring transition graph action without id (bug?)"); + crm_err("Ignoring transition graph action without " PCMK_XA_ID + " (bug?)"); crm_log_xml_trace(xml_action, "invalid"); return NULL; } if (pcmk__xe_is(xml_action, PCMK__XE_RSC_OP)) { action_type = pcmk__rsc_graph_action; } else if (pcmk__xe_is(xml_action, PCMK__XE_PSEUDO_EVENT)) { action_type = pcmk__pseudo_graph_action; } else if (pcmk__xe_is(xml_action, PCMK__XE_CRM_EVENT)) { action_type = pcmk__cluster_graph_action; } else { crm_err("Ignoring transition graph action of unknown type '%s' (bug?)", xml_action->name); crm_log_xml_trace(xml_action, "invalid"); return NULL; } action = calloc(1, sizeof(pcmk__graph_action_t)); if (action == NULL) { crm_perror(LOG_CRIT, "Cannot unpack transition graph action"); crm_log_xml_trace(xml_action, "lost"); return NULL; } pcmk__scan_min_int(value, &(action->id), -1); action->type = pcmk__rsc_graph_action; action->xml = pcmk__xml_copy(NULL, xml_action); action->synapse = parent; action->type = action_type; action->params = xml2list(action->xml); value = crm_meta_value(action->params, PCMK_META_TIMEOUT); pcmk__scan_min_int(value, &(action->timeout), 0); /* Take PCMK_META_START_DELAY into account for the timeout of the action * timer */ value = crm_meta_value(action->params, PCMK_META_START_DELAY); { int start_delay; pcmk__scan_min_int(value, &start_delay, 0); action->timeout += start_delay; } if (pcmk__guint_from_hash(action->params, CRM_META "_" PCMK_META_INTERVAL, 0, &(action->interval_ms)) != pcmk_rc_ok) { action->interval_ms = 0; } value = crm_meta_value(action->params, PCMK__META_CAN_FAIL); if (value != NULL) { int can_fail = 0; if ((crm_str_to_boolean(value, &can_fail) > 0) && (can_fail > 0)) { pcmk__set_graph_action_flags(action, pcmk__graph_action_can_fail); } else { pcmk__clear_graph_action_flags(action, pcmk__graph_action_can_fail); } #ifndef PCMK__COMPAT_2_0 if (pcmk_is_set(action->flags, pcmk__graph_action_can_fail)) { crm_warn("Support for the " PCMK__META_CAN_FAIL " meta-attribute " "is deprecated and will be removed in a future release"); } #endif } crm_trace("Action %d has timer set to %dms", action->id, action->timeout); return action; } /*! * \internal * \brief Unpack transition graph synapse from XML * * \param[in,out] new_graph Transition graph that synapse is part of * \param[in] xml_synapse Synapse XML * * \return Newly allocated synapse on success, or NULL otherwise */ static pcmk__graph_synapse_t * unpack_synapse(pcmk__graph_t *new_graph, const xmlNode *xml_synapse) { const char *value = NULL; xmlNode *action_set = NULL; pcmk__graph_synapse_t *new_synapse = NULL; crm_trace("Unpacking synapse %s", pcmk__xe_id(xml_synapse)); new_synapse = calloc(1, sizeof(pcmk__graph_synapse_t)); if (new_synapse == NULL) { return NULL; } pcmk__scan_min_int(pcmk__xe_id(xml_synapse), &(new_synapse->id), 0); value = crm_element_value(xml_synapse, PCMK__XA_PRIORITY); pcmk__scan_min_int(value, &(new_synapse->priority), 0); CRM_CHECK(new_synapse->id >= 0, free(new_synapse); return NULL); new_graph->num_synapses++; crm_trace("Unpacking synapse %s action sets", crm_element_value(xml_synapse, PCMK_XA_ID)); for (action_set = first_named_child(xml_synapse, "action_set"); action_set != NULL; action_set = crm_next_same_xml(action_set)) { for (xmlNode *action = pcmk__xe_first_child(action_set); action != NULL; action = pcmk__xe_next(action)) { pcmk__graph_action_t *new_action = unpack_action(new_synapse, action); if (new_action == NULL) { continue; } crm_trace("Adding action %d to synapse %d", new_action->id, new_synapse->id); new_graph->num_actions++; new_synapse->actions = g_list_append(new_synapse->actions, new_action); } } crm_trace("Unpacking synapse %s inputs", pcmk__xe_id(xml_synapse)); for (xmlNode *inputs = first_named_child(xml_synapse, "inputs"); inputs != NULL; inputs = crm_next_same_xml(inputs)) { for (xmlNode *trigger = first_named_child(inputs, "trigger"); trigger != NULL; trigger = crm_next_same_xml(trigger)) { for (xmlNode *input = pcmk__xe_first_child(trigger); input != NULL; input = pcmk__xe_next(input)) { pcmk__graph_action_t *new_input = unpack_action(new_synapse, input); if (new_input == NULL) { continue; } crm_trace("Adding input %d to synapse %d", new_input->id, new_synapse->id); new_synapse->inputs = g_list_append(new_synapse->inputs, new_input); } } } return new_synapse; } /*! * \internal * \brief Unpack transition graph XML * * \param[in] xml_graph Transition graph XML to unpack * \param[in] reference Where the XML came from (for logging) * * \return Newly allocated transition graph on success, NULL otherwise * \note The caller is responsible for freeing the return value using * pcmk__free_graph(). * \note The XML is expected to be structured like: ... ... */ pcmk__graph_t * pcmk__unpack_graph(const xmlNode *xml_graph, const char *reference) { pcmk__graph_t *new_graph = NULL; new_graph = calloc(1, sizeof(pcmk__graph_t)); if (new_graph == NULL) { return NULL; } new_graph->source = strdup(pcmk__s(reference, "unknown")); if (new_graph->source == NULL) { free(new_graph); return NULL; } new_graph->id = -1; new_graph->abort_priority = 0; new_graph->network_delay = 0; new_graph->stonith_timeout = 0; new_graph->completion_action = pcmk__graph_done; // Parse top-level attributes from PCMK__XE_TRANSITION_GRAPH if (xml_graph != NULL) { const char *buf = crm_element_value(xml_graph, "transition_id"); CRM_CHECK(buf != NULL, free(new_graph); return NULL); pcmk__scan_min_int(buf, &(new_graph->id), -1); buf = crm_element_value(xml_graph, PCMK_OPT_CLUSTER_DELAY); CRM_CHECK(buf != NULL, free(new_graph); return NULL); pcmk_parse_interval_spec(buf, &(new_graph->network_delay)); buf = crm_element_value(xml_graph, PCMK_OPT_STONITH_TIMEOUT); if (buf == NULL) { new_graph->stonith_timeout = new_graph->network_delay; } else { pcmk_parse_interval_spec(buf, &(new_graph->stonith_timeout)); } // Use 0 (dynamic limit) as default/invalid, -1 (no limit) as minimum buf = crm_element_value(xml_graph, PCMK_OPT_BATCH_LIMIT); if ((buf == NULL) || (pcmk__scan_min_int(buf, &(new_graph->batch_limit), -1) != pcmk_rc_ok)) { new_graph->batch_limit = 0; } buf = crm_element_value(xml_graph, PCMK_OPT_MIGRATION_LIMIT); pcmk__scan_min_int(buf, &(new_graph->migration_limit), -1); pcmk__str_update(&(new_graph->failed_stop_offset), crm_element_value(xml_graph, "failed-stop-offset")); pcmk__str_update(&(new_graph->failed_start_offset), crm_element_value(xml_graph, "failed-start-offset")); if (crm_element_value_epoch(xml_graph, "recheck-by", &(new_graph->recheck_by)) != pcmk_ok) { new_graph->recheck_by = 0; } } // Unpack each child element for (const xmlNode *synapse_xml = first_named_child(xml_graph, "synapse"); synapse_xml != NULL; synapse_xml = crm_next_same_xml(synapse_xml)) { pcmk__graph_synapse_t *new_synapse = unpack_synapse(new_graph, synapse_xml); if (new_synapse != NULL) { new_graph->synapses = g_list_append(new_graph->synapses, new_synapse); } } crm_debug("Unpacked transition %d from %s: %d actions in %d synapses", new_graph->id, new_graph->source, new_graph->num_actions, new_graph->num_synapses); return new_graph; } /* * Functions for freeing transition graph objects */ /*! * \internal * \brief Free a transition graph action object * * \param[in,out] user_data Action to free */ static void free_graph_action(gpointer user_data) { pcmk__graph_action_t *action = user_data; if (action->timer != 0) { crm_warn("Cancelling timer for graph action %d", action->id); g_source_remove(action->timer); } if (action->params != NULL) { g_hash_table_destroy(action->params); } free_xml(action->xml); free(action); } /*! * \internal * \brief Free a transition graph synapse object * * \param[in,out] user_data Synapse to free */ static void free_graph_synapse(gpointer user_data) { pcmk__graph_synapse_t *synapse = user_data; g_list_free_full(synapse->actions, free_graph_action); g_list_free_full(synapse->inputs, free_graph_action); free(synapse); } /*! * \internal * \brief Free a transition graph object * * \param[in,out] graph Transition graph to free */ void pcmk__free_graph(pcmk__graph_t *graph) { if (graph != NULL) { g_list_free_full(graph->synapses, free_graph_synapse); free(graph->source); free(graph->failed_stop_offset); free(graph->failed_start_offset); free(graph); } } /* * Other transition graph utilities */ /*! * \internal * \brief Synthesize an executor event from a graph action * * \param[in] resource If not NULL, use greater call ID than in this XML * \param[in] action Graph action * \param[in] status What to use as event execution status * \param[in] rc What to use as event exit status * \param[in] exit_reason What to use as event exit reason * * \return Newly allocated executor event on success, or NULL otherwise */ lrmd_event_data_t * pcmk__event_from_graph_action(const xmlNode *resource, const pcmk__graph_action_t *action, int status, int rc, const char *exit_reason) { lrmd_event_data_t *op = NULL; GHashTableIter iter; const char *name = NULL; const char *value = NULL; xmlNode *action_resource = NULL; CRM_CHECK(action != NULL, return NULL); CRM_CHECK(action->type == pcmk__rsc_graph_action, return NULL); action_resource = first_named_child(action->xml, PCMK_XE_PRIMITIVE); CRM_CHECK(action_resource != NULL, crm_log_xml_warn(action->xml, "invalid"); return NULL); op = lrmd_new_event(pcmk__xe_id(action_resource), crm_element_value(action->xml, PCMK_XA_OPERATION), action->interval_ms); lrmd__set_result(op, rc, status, exit_reason); op->t_run = time(NULL); op->t_rcchange = op->t_run; op->params = pcmk__strkey_table(free, free); g_hash_table_iter_init(&iter, action->params); while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) { pcmk__insert_dup(op->params, name, value); } for (xmlNode *xop = pcmk__xe_first_child(resource); xop != NULL; xop = pcmk__xe_next(xop)) { int tmp = 0; crm_element_value_int(xop, PCMK__XA_CALL_ID, &tmp); crm_debug("Got call_id=%d for %s", tmp, pcmk__xe_id(resource)); if (tmp > op->call_id) { op->call_id = tmp; } } op->call_id++; return op; } diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 186729ffbe..7410abb389 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -1,5158 +1,5159 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include CRM_TRACE_INIT_DATA(pe_status); // A (parsed) resource action history entry struct action_history { pcmk_resource_t *rsc; // Resource that history is for pcmk_node_t *node; // Node that history is for xmlNode *xml; // History entry XML // Parsed from entry XML const char *id; // XML ID of history entry const char *key; // Operation key of action const char *task; // Action name const char *exit_reason; // Exit reason given for result guint interval_ms; // Action interval int call_id; // Call ID of action int expected_exit_status; // Expected exit status of action int exit_status; // Actual exit status of action int execution_status; // Execution status of action }; /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than * use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the * flag is stringified more readably in log messages. */ #define set_config_flag(scheduler, option, flag) do { \ GHashTable *config_hash = (scheduler)->config_hash; \ const char *scf_value = pcmk__cluster_option(config_hash, (option)); \ \ if (scf_value != NULL) { \ if (crm_is_true(scf_value)) { \ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } else { \ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, "Scheduler", \ crm_system_name, (scheduler)->flags, \ (flag), #flag); \ } \ } \ } while(0) static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *failed); static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node); static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler); static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler); static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler); static gboolean is_dangling_guest_node(pcmk_node_t *node) { /* we are looking for a remote-node that was supposed to be mapped to a * container resource, but all traces of that container have disappeared * from both the config and the status section. */ if (pcmk__is_pacemaker_remote_node(node) && (node->details->remote_rsc != NULL) && (node->details->remote_rsc->container == NULL) && pcmk_is_set(node->details->remote_rsc->flags, pcmk_rsc_removed_filler)) { return TRUE; } return FALSE; } /*! * \brief Schedule a fence action for a node * * \param[in,out] scheduler Scheduler data * \param[in,out] node Node to fence * \param[in] reason Text description of why fencing is needed * \param[in] priority_delay Whether to consider * \c PCMK_OPT_PRIORITY_FENCING_DELAY */ void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay) { CRM_CHECK(node, return); /* A guest node is fenced by marking its container as failed */ if (pcmk__is_guest_or_bundle_node(node)) { pcmk_resource_t *rsc = node->details->remote_rsc->container; if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { crm_notice("Not fencing guest node %s " "(otherwise would because %s): " "its guest resource %s is unmanaged", pcmk__node_name(node), reason, rsc->id); } else { pcmk__sched_warn("Guest node %s will be fenced " "(by recovering its guest resource %s): %s", pcmk__node_name(node), rsc->id, reason); /* We don't mark the node as unclean because that would prevent the * node from running resources. We want to allow it to run resources * in this transition if the recovery succeeds. */ node->details->remote_requires_reset = TRUE; pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); } } } else if (is_dangling_guest_node(node)) { crm_info("Cleaning up dangling connection for guest node %s: " "fencing was already done because %s, " "and guest resource no longer exists", pcmk__node_name(node), reason); pcmk__set_rsc_flags(node->details->remote_rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); } else if (pcmk__is_remote_node(node)) { pcmk_resource_t *rsc = node->details->remote_rsc; if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { crm_notice("Not fencing remote node %s " "(otherwise would because %s): connection is unmanaged", pcmk__node_name(node), reason); } else if(node->details->remote_requires_reset == FALSE) { node->details->remote_requires_reset = TRUE; pcmk__sched_warn("Remote node %s %s: %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); } node->details->unclean = TRUE; // No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler); } else if (node->details->unclean) { crm_trace("Cluster node %s %s because %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean", reason); } else { pcmk__sched_warn("Cluster node %s %s: %s", pcmk__node_name(node), pe_can_fence(scheduler, node)? "will be fenced" : "is unclean", reason); node->details->unclean = TRUE; pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler); } } // @TODO xpaths can't handle templates, rules, or id-refs // nvpair with provides or requires set to unfencing #define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR \ "[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'" \ "or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') " \ "and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']" // unfencing in rsc_defaults or any resource #define XPATH_ENABLE_UNFENCING \ "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \ "//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR \ "|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \ "/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR static void set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler) { xmlXPathObjectPtr result = NULL; if (!pcmk_is_set(scheduler->flags, flag)) { result = xpath_search(scheduler->input, xpath); if (result && (numXpathResults(result) > 0)) { pcmk__set_scheduler_flags(scheduler, flag); } freeXpathObject(result); } } gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler) { const char *value = NULL; guint interval_ms = 0U; GHashTable *config_hash = pcmk__strkey_table(free, free); pe_rule_eval_data_t rule_data = { .node_hash = NULL, .now = scheduler->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; scheduler->config_hash = config_hash; pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_data, config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, FALSE, scheduler); pcmk__validate_cluster_options(config_hash); set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES, pcmk_sched_probe_resources); if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) { crm_info("Startup probes: disabled (dangerous)"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG); if (value && crm_is_true(value)) { crm_info("Watchdog-based self-fencing will be performed via SBD if " "fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " is nonzero"); pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing); } /* Set certain flags via xpath here, so they can be used before the relevant * configuration sections are unpacked. */ set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING, scheduler); value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT); pcmk_parse_interval_spec(value, &interval_ms); if (interval_ms >= INT_MAX) { scheduler->stonith_timeout = INT_MAX; } else { scheduler->stonith_timeout = (int) interval_ms; } crm_debug("STONITH timeout: %d", scheduler->stonith_timeout); set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED, pcmk_sched_fencing_enabled); if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { crm_debug("STONITH of failed nodes is enabled"); } else { crm_debug("STONITH of failed nodes is disabled"); } scheduler->stonith_action = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_ACTION); if (!strcmp(scheduler->stonith_action, PCMK__ACTION_POWEROFF)) { pcmk__warn_once(pcmk__wo_poweroff, "Support for " PCMK_OPT_STONITH_ACTION " of " "'" PCMK__ACTION_POWEROFF "' is deprecated and will be " "removed in a future release " "(use '" PCMK_ACTION_OFF "' instead)"); scheduler->stonith_action = PCMK_ACTION_OFF; } crm_trace("STONITH will %s nodes", scheduler->stonith_action); set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING, pcmk_sched_concurrent_fencing); if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) { crm_debug("Concurrent fencing is enabled"); } else { crm_debug("Concurrent fencing is disabled"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY); if (value) { pcmk_parse_interval_spec(value, &interval_ms); scheduler->priority_fencing_delay = (int) (interval_ms / 1000); crm_trace("Priority fencing delay is %ds", scheduler->priority_fencing_delay); } set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES, pcmk_sched_stop_all); crm_debug("Stop all active resources: %s", pcmk__flag_text(scheduler->flags, pcmk_sched_stop_all)); set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER, pcmk_sched_symmetric_cluster); if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) { crm_debug("Cluster is symmetric" " - resources can run anywhere by default"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY); if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_ignore; } else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_freeze; } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) { scheduler->no_quorum_policy = pcmk_no_quorum_demote; } else if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei)) { if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { int do_panic = 0; crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC, &do_panic); if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) { scheduler->no_quorum_policy = pcmk_no_quorum_fence; } else { crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY " to 'stop': cluster has never had quorum"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY " to 'stop' because fencing is disabled"); scheduler->no_quorum_policy = pcmk_no_quorum_stop; } } else { scheduler->no_quorum_policy = pcmk_no_quorum_stop; } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: crm_debug("On loss of quorum: Freeze resources"); break; case pcmk_no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; case pcmk_no_quorum_demote: crm_debug("On loss of quorum: " "Demote promotable resources and stop other resources"); break; case pcmk_no_quorum_fence: crm_notice("On loss of quorum: Fence all remaining nodes"); break; case pcmk_no_quorum_ignore: crm_notice("On loss of quorum: Ignore"); break; } set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES, pcmk_sched_stop_removed_resources); if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) { crm_trace("Orphan resources are stopped"); } else { crm_trace("Orphan resources are ignored"); } set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS, pcmk_sched_cancel_removed_actions); if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) { crm_trace("Orphan resource actions are stopped"); } else { crm_trace("Orphan resource actions are ignored"); } value = pcmk__cluster_option(config_hash, PCMK__OPT_REMOVE_AFTER_STOP); if (value != NULL) { if (crm_is_true(value)) { pcmk__set_scheduler_flags(scheduler, pcmk_sched_remove_after_stop); #ifndef PCMK__COMPAT_2_0 pcmk__warn_once(pcmk__wo_remove_after, "Support for the " PCMK__OPT_REMOVE_AFTER_STOP " cluster property is deprecated and will be " "removed in a future release"); #endif } else { pcmk__clear_scheduler_flags(scheduler, pcmk_sched_remove_after_stop); } } set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE, pcmk_sched_in_maintenance); crm_trace("Maintenance mode: %s", pcmk__flag_text(scheduler->flags, pcmk_sched_in_maintenance)); set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL, pcmk_sched_start_failure_fatal); if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) { crm_trace("Start failures are always fatal"); } else { crm_trace("Start failures are handled by failcount"); } if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING, pcmk_sched_startup_fencing); } if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) { crm_trace("Unseen nodes will be fenced"); } else { pcmk__warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes"); } pe__unpack_node_health_scores(scheduler); scheduler->placement_strategy = pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY); crm_trace("Placement strategy: %s", scheduler->placement_strategy); set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK, pcmk_sched_shutdown_lock); if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) { value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT); pcmk_parse_interval_spec(value, &(scheduler->shutdown_lock)); scheduler->shutdown_lock /= 1000; crm_trace("Resources will be locked to nodes that were cleanly " "shut down (locks expire after %s)", pcmk__readable_interval(scheduler->shutdown_lock)); } else { crm_trace("Resources will not be locked to nodes that were cleanly " "shut down"); } value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT); pcmk_parse_interval_spec(value, &(scheduler->node_pending_timeout)); scheduler->node_pending_timeout /= 1000; if (scheduler->node_pending_timeout == 0) { crm_trace("Do not fence pending nodes"); } else { crm_trace("Fence pending nodes after %s", pcmk__readable_interval(scheduler->node_pending_timeout * 1000)); } return TRUE; } pcmk_node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pcmk_scheduler_t *scheduler) { pcmk_node_t *new_node = NULL; if (pe_find_node(scheduler->nodes, uname) != NULL) { pcmk__config_warn("More than one node entry has name '%s'", uname); } new_node = calloc(1, sizeof(pcmk_node_t)); if (new_node == NULL) { pcmk__sched_err("Could not allocate memory for node %s", uname); return NULL; } new_node->weight = char2score(score); new_node->details = calloc(1, sizeof(struct pe_node_shared_s)); if (new_node->details == NULL) { free(new_node); pcmk__sched_err("Could not allocate memory for node %s", uname); return NULL; } crm_trace("Creating node for entry %s/%s", uname, id); new_node->details->id = id; new_node->details->uname = uname; new_node->details->online = FALSE; new_node->details->shutdown = FALSE; new_node->details->rsc_discovery_enabled = TRUE; new_node->details->running_rsc = NULL; new_node->details->data_set = scheduler; if (pcmk__str_eq(type, PCMK_VALUE_MEMBER, pcmk__str_null_matches|pcmk__str_casei)) { new_node->details->type = pcmk_node_variant_cluster; } else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) { new_node->details->type = pcmk_node_variant_remote; pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_remote_nodes); } else { /* @COMPAT 'ping' is the default for backward compatibility, but it * should be changed to 'member' at a compatibility break */ if (!pcmk__str_eq(type, PCMK__VALUE_PING, pcmk__str_casei)) { pcmk__config_warn("Node %s has unrecognized type '%s', " "assuming '" PCMK__VALUE_PING "'", pcmk__s(uname, "without name"), type); } pcmk__warn_once(pcmk__wo_ping_node, "Support for nodes of type '" PCMK__VALUE_PING "' " "(such as %s) is deprecated and will be removed in a " "future release", pcmk__s(uname, "unnamed node")); new_node->details->type = node_ping; } new_node->details->attrs = pcmk__strkey_table(free, free); if (pcmk__is_pacemaker_remote_node(new_node)) { pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "remote"); } else { pcmk__insert_dup(new_node->details->attrs, CRM_ATTR_KIND, "cluster"); } new_node->details->utilization = pcmk__strkey_table(free, free); new_node->details->digest_cache = pcmk__strkey_table(free, pe__free_digests); scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node, pe__cmp_node_name); return new_node; } static const char * expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data) { xmlNode *attr_set = NULL; xmlNode *attr = NULL; const char *container_id = pcmk__xe_id(xml_obj); const char *remote_name = NULL; const char *remote_server = NULL; const char *remote_port = NULL; const char *connect_timeout = "60s"; const char *remote_allow_migrate=NULL; const char *is_managed = NULL; for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) { if (!pcmk__xe_is(attr_set, PCMK_XE_META_ATTRIBUTES)) { continue; } for (attr = pcmk__xe_first_child(attr_set); attr != NULL; attr = pcmk__xe_next(attr)) { const char *value = crm_element_value(attr, PCMK_XA_VALUE); const char *name = crm_element_value(attr, PCMK_XA_NAME); if (name == NULL) { // Sanity continue; } if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) { remote_name = value; } else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) { remote_server = value; } else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) { remote_port = value; } else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) { connect_timeout = value; } else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) { remote_allow_migrate = value; } else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) { is_managed = value; } } } if (remote_name == NULL) { return NULL; } if (pe_find_resource(data->resources, remote_name) != NULL) { return NULL; } pe_create_remote_xml(parent, remote_name, container_id, remote_allow_migrate, is_managed, connect_timeout, remote_server, remote_port); return remote_name; } static void handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node) { if ((new_node->details->type == pcmk_node_variant_remote) && (new_node->details->remote_rsc == NULL)) { /* Ignore fencing for remote nodes that don't have a connection resource * associated with them. This happens when remote node entries get left * in the nodes section after the connection resource is removed. */ return; } if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) { // All nodes are unclean until we've seen their status entry new_node->details->unclean = TRUE; } else { // Blind faith ... new_node->details->unclean = FALSE; } /* We need to be able to determine if a node's status section * exists or not separate from whether the node is unclean. */ new_node->details->unseen = TRUE; } gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; pcmk_node_t *new_node = NULL; const char *id = NULL; const char *uname = NULL; const char *type = NULL; const char *score = NULL; for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { if (pcmk__xe_is(xml_obj, PCMK_XE_NODE)) { new_node = NULL; id = crm_element_value(xml_obj, PCMK_XA_ID); uname = crm_element_value(xml_obj, PCMK_XA_UNAME); type = crm_element_value(xml_obj, PCMK_XA_TYPE); score = crm_element_value(xml_obj, PCMK_XA_SCORE); crm_trace("Processing node %s/%s", uname, id); if (id == NULL) { pcmk__config_err("Ignoring <" PCMK_XE_NODE "> entry in configuration without id"); continue; } new_node = pe_create_node(id, uname, type, score, scheduler); if (new_node == NULL) { return FALSE; } handle_startup_fencing(scheduler, new_node); add_node_attrs(xml_obj, new_node, FALSE, scheduler); crm_trace("Done with node %s", crm_element_value(xml_obj, PCMK_XA_UNAME)); } } if (scheduler->localhost && (pe_find_node(scheduler->nodes, scheduler->localhost) == NULL)) { crm_info("Creating a fake local node"); pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0, scheduler); } return TRUE; } static void setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler) { const char *container_id = NULL; if (rsc->children) { g_list_foreach(rsc->children, (GFunc) setup_container, scheduler); return; } container_id = g_hash_table_lookup(rsc->meta, PCMK__META_CONTAINER); if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) { pcmk_resource_t *container = pe_find_resource(scheduler->resources, container_id); if (container) { rsc->container = container; pcmk__set_rsc_flags(container, pcmk_rsc_has_filler); container->fillers = g_list_append(container->fillers, rsc); pcmk__rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id); } else { pcmk__config_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id); } } } gboolean unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; /* Create remote nodes and guest nodes from the resource configuration * before unpacking resources. */ for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { const char *new_node_id = NULL; /* Check for remote nodes, which are defined by ocf:pacemaker:remote * primitives. */ if (xml_contains_remote_node(xml_obj)) { new_node_id = pcmk__xe_id(xml_obj); /* The "pe_find_node" check is here to make sure we don't iterate over * an expanded node that has already been added to the node list. */ if (new_node_id && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) { crm_trace("Found remote node %s defined by resource %s", new_node_id, pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, NULL, scheduler); } continue; } /* Check for guest nodes, which are defined by special meta-attributes * of a primitive of any type (for example, VirtualDomain or Xen). */ if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) { /* This will add an ocf:pacemaker:remote primitive to the * configuration for the guest node's connection, to be unpacked * later. */ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, scheduler); if (new_node_id && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s", new_node_id, pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, NULL, scheduler); } continue; } /* Check for guest nodes inside a group. Clones are currently not * supported as guest nodes. */ if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) { xmlNode *xml_obj2 = NULL; for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) { new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, scheduler); if (new_node_id && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) { crm_trace("Found guest node %s in resource %s inside group %s", new_node_id, pcmk__xe_id(xml_obj2), pcmk__xe_id(xml_obj)); pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE, NULL, scheduler); } } } } return TRUE; } /* Call this after all the nodes and resources have been * unpacked, but before the status section is read. * * A remote node's online status is reflected by the state * of the remote node's connection resource. We need to link * the remote node to this connection resource so we can have * easy access to the connection resource during the scheduler calculations. */ static void link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc) { pcmk_node_t *remote_node = NULL; if (new_rsc->is_remote_node == FALSE) { return; } if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) { /* remote_nodes and remote_resources are not linked in quick location calculations */ return; } remote_node = pe_find_node(scheduler->nodes, new_rsc->id); CRM_CHECK(remote_node != NULL, return); pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s", new_rsc->id, pcmk__node_name(remote_node)); remote_node->details->remote_rsc = new_rsc; if (new_rsc->container == NULL) { /* Handle start-up fencing for remote nodes (as opposed to guest nodes) * the same as is done for cluster nodes. */ handle_startup_fencing(scheduler, remote_node); } else { /* pe_create_node() marks the new node as "remote" or "cluster"; now * that we know the node is a guest node, update it correctly. */ pcmk__insert_dup(remote_node->details->attrs, CRM_ATTR_KIND, "container"); } } static void destroy_tag(gpointer data) { pcmk_tag_t *tag = data; if (tag) { free(tag->id); g_list_free_full(tag->refs, free); free(tag); } } /*! * \internal * \brief Parse configuration XML for resource information * * \param[in] xml_resources Top of resource configuration XML * \param[in,out] scheduler Scheduler data * * \return TRUE * * \note unpack_remote_nodes() MUST be called before this, so that the nodes can * be used when pe__unpack_resource() calls resource_location() */ gboolean unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; GList *gIter = NULL; scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag); for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { pcmk_resource_t *new_rsc = NULL; const char *id = pcmk__xe_id(xml_obj); if (pcmk__str_empty(id)) { pcmk__config_err("Ignoring <%s> resource without ID", xml_obj->name); continue; } if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) { if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id, NULL, NULL) == FALSE) { /* Record the template's ID for the knowledge of its existence anyway. */ pcmk__insert_dup(scheduler->template_rsc_sets, id, NULL); } continue; } crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id); if (pe__unpack_resource(xml_obj, &new_rsc, NULL, scheduler) == pcmk_rc_ok) { scheduler->resources = g_list_append(scheduler->resources, new_rsc); pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id); } else { pcmk__config_err("Ignoring <%s> resource '%s' " "because configuration is invalid", xml_obj->name, id); } } for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; setup_container(rsc, scheduler); link_rsc2remotenode(scheduler, rsc); } scheduler->resources = g_list_sort(scheduler->resources, pe__cmp_rsc_priority); if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) { /* Ignore */ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled) && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) { pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined"); pcmk__config_err("Either configure some or disable STONITH with the " PCMK_OPT_STONITH_ENABLED " option"); pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity"); } return TRUE; } gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler) { xmlNode *xml_tag = NULL; scheduler->tags = pcmk__strkey_table(free, destroy_tag); for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) { xmlNode *xml_obj_ref = NULL; const char *tag_id = pcmk__xe_id(xml_tag); if (!pcmk__xe_is(xml_tag, PCMK_XE_TAG)) { continue; } if (tag_id == NULL) { pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID, (const char *) xml_tag->name); continue; } for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) { const char *obj_ref = pcmk__xe_id(xml_obj_ref); if (!pcmk__xe_is(xml_obj_ref, PCMK_XE_OBJ_REF)) { continue; } if (obj_ref == NULL) { pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID, xml_obj_ref->name, tag_id); continue; } if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) { return FALSE; } } } return TRUE; } /* The ticket state section: * "/cib/status/tickets/ticket_state" */ static gboolean unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler) { const char *ticket_id = NULL; const char *granted = NULL; const char *last_granted = NULL; const char *standby = NULL; xmlAttrPtr xIter = NULL; pcmk_ticket_t *ticket = NULL; ticket_id = pcmk__xe_id(xml_ticket); if (pcmk__str_empty(ticket_id)) { return FALSE; } crm_trace("Processing ticket state for %s", ticket_id); ticket = g_hash_table_lookup(scheduler->tickets, ticket_id); if (ticket == NULL) { ticket = ticket_new(ticket_id, scheduler); if (ticket == NULL) { return FALSE; } } for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) { const char *prop_name = (const char *)xIter->name; const char *prop_value = pcmk__xml_attr_value(xIter); if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) { continue; } pcmk__insert_dup(ticket->state, prop_name, prop_value); } granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED); if (granted && crm_is_true(granted)) { ticket->granted = TRUE; crm_info("We have ticket '%s'", ticket->id); } else { ticket->granted = FALSE; crm_info("We do not have ticket '%s'", ticket->id); } last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED); if (last_granted) { long long last_granted_ll; pcmk__scan_ll(last_granted, &last_granted_ll, 0LL); ticket->last_granted = (time_t) last_granted_ll; } standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY); if (standby && crm_is_true(standby)) { ticket->standby = TRUE; if (ticket->granted) { crm_info("Granted ticket '%s' is in standby-mode", ticket->id); } } else { ticket->standby = FALSE; } crm_trace("Done with ticket state for %s", ticket_id); return TRUE; } static gboolean unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler) { xmlNode *xml_obj = NULL; for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) { if (!pcmk__xe_is(xml_obj, PCMK__XE_TICKET_STATE)) { continue; } unpack_ticket_state(xml_obj, scheduler); } return TRUE; } static void unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = NULL; pcmk_resource_t *rsc = NULL; if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) { return; } if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) { return; } crm_trace("Processing Pacemaker Remote node %s", pcmk__node_name(this_node)); pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE), &(this_node->details->remote_maintenance), 0); rsc = this_node->details->remote_rsc; if (this_node->details->remote_requires_reset == FALSE) { this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; } attrs = find_xml_node(state, PCMK__XE_TRANSIENT_ATTRIBUTES, FALSE); add_node_attrs(attrs, this_node, TRUE, scheduler); if (pe__shutdown_requested(this_node)) { crm_info("%s is shutting down", pcmk__node_name(this_node)); this_node->details->shutdown = TRUE; } if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL, pcmk__rsc_node_current))) { crm_info("%s is in standby mode", pcmk__node_name(this_node)); this_node->details->standby = TRUE; } if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL, pcmk__rsc_node_current)) || ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) { crm_info("%s is in maintenance mode", pcmk__node_name(this_node)); this_node->details->maintenance = TRUE; } discovery = pcmk__node_attr(this_node, PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED, NULL, pcmk__rsc_node_current); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__warn_once(pcmk__wo_rdisc_enabled, "Support for the " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " node attribute is deprecated and will be removed" " (and behave as 'true') in a future release."); if (pcmk__is_remote_node(this_node) && !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { pcmk__config_warn("Ignoring " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " attribute on Pacemaker Remote node %s" " because fencing is disabled", pcmk__node_name(this_node)); } else { /* This is either a remote node with fencing enabled, or a guest * node. We don't care whether fencing is enabled when fencing guest * nodes, because they are "fenced" by recovering their containing * resource. */ crm_info("%s has resource discovery disabled", pcmk__node_name(this_node)); this_node->details->rsc_discovery_enabled = FALSE; } } } /*! * \internal * \brief Unpack a cluster node's transient attributes * * \param[in] state CIB node state XML * \param[in,out] node Cluster node whose attributes are being unpacked * \param[in,out] scheduler Scheduler data */ static void unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { const char *discovery = NULL; const xmlNode *attrs = find_xml_node(state, PCMK__XE_TRANSIENT_ATTRIBUTES, FALSE); add_node_attrs(attrs, node, TRUE, scheduler); if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL, pcmk__rsc_node_current))) { crm_info("%s is in standby mode", pcmk__node_name(node)); node->details->standby = TRUE; } if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL, pcmk__rsc_node_current))) { crm_info("%s is in maintenance mode", pcmk__node_name(node)); node->details->maintenance = TRUE; } discovery = pcmk__node_attr(node, PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED, NULL, pcmk__rsc_node_current); if ((discovery != NULL) && !crm_is_true(discovery)) { pcmk__config_warn("Ignoring " PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED " attribute for %s because disabling resource" " discovery is not allowed for cluster nodes", pcmk__node_name(node)); } } /*! * \internal * \brief Unpack a node state entry (first pass) * * Unpack one node state entry from status. This unpacks information from the * \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not * the resource history inside it. Multiple passes through the status are needed * to fully unpack everything. * * \param[in] state CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler) { const char *id = NULL; const char *uname = NULL; pcmk_node_t *this_node = NULL; id = crm_element_value(state, PCMK_XA_ID); if (id == NULL) { pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without " PCMK_XA_ID); crm_log_xml_info(state, "missing-id"); return; } uname = crm_element_value(state, PCMK_XA_UNAME); if (uname == NULL) { /* If a joining peer makes the cluster acquire the quorum from corosync * meanwhile it has not joined CPG membership of pacemaker-controld yet, * it's possible that the created PCMK__XE_NODE_STATE entry doesn't have * a PCMK_XA_UNAME yet. We should recognize the node as `pending` and * wait for it to join CPG. */ crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" " "without " PCMK_XA_UNAME, id); } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { - pcmk__config_warn("Ignoring recorded node state for id=\"%s\" (%s) " + pcmk__config_warn("Ignoring recorded node state for " + PCMK_XA_ID "=\"%s\" (%s) " "because it is no longer in the configuration", id, pcmk__s(uname, "uname unknown")); return; } if (pcmk__is_pacemaker_remote_node(this_node)) { /* We can't determine the online status of Pacemaker Remote nodes until * after all resource history has been unpacked. In this first pass, we * do need to mark whether the node has been fenced, as this plays a * role during unpacking cluster node resource state. */ pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED), &(this_node->details->remote_was_fenced), 0); return; } unpack_transient_attributes(state, this_node, scheduler); /* Provisionally mark this cluster node as clean. We have at least seen it * in the current cluster's lifetime. */ this_node->details->unclean = FALSE; this_node->details->unseen = FALSE; crm_trace("Determining online status of cluster node %s (id %s)", pcmk__node_name(this_node), id); determine_online_status(state, this_node, scheduler); if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate) && this_node->details->online && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) { /* Everything else should flow from this automatically * (at least until the scheduler becomes able to migrate off * healthy resources) */ pe_fence_node(scheduler, this_node, "cluster does not have quorum", FALSE); } } /*! * \internal * \brief Unpack nodes' resource history as much as possible * * Unpack as many nodes' resource history as possible in one pass through the * status. We need to process Pacemaker Remote nodes' connections/containers * before unpacking their history; the connection/container history will be * in another node's history, so it might take multiple passes to unpack * everything. * * \param[in] status CIB XML status section * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen * \param[in,out] scheduler Scheduler data * * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done, * or EAGAIN if more unpacking remains to be done) */ static int unpack_node_history(const xmlNode *status, bool fence, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; // Loop through all PCMK__XE_NODE_STATE entries in CIB status for (const xmlNode *state = first_named_child(status, PCMK__XE_NODE_STATE); state != NULL; state = crm_next_same_xml(state)) { const char *id = pcmk__xe_id(state); const char *uname = crm_element_value(state, PCMK_XA_UNAME); pcmk_node_t *this_node = NULL; if ((id == NULL) || (uname == NULL)) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history from malformed " PCMK__XE_NODE_STATE " without id and/or uname"); continue; } this_node = pe_find_node_any(scheduler->nodes, id, uname); if (this_node == NULL) { // Warning already logged in first pass through status section crm_trace("Not unpacking resource history for node %s because " "no longer in configuration", id); continue; } if (this_node->details->unpacked) { crm_trace("Not unpacking resource history for node %s because " "already unpacked", id); continue; } if (fence) { // We're processing all remaining nodes } else if (pcmk__is_guest_or_bundle_node(this_node)) { /* We can unpack a guest node's history only after we've unpacked * other resource history to the point that we know that the node's * connection and containing resource are both up. */ pcmk_resource_t *rsc = this_node->details->remote_rsc; if ((rsc == NULL) || (rsc->role != pcmk_role_started) || (rsc->container->role != pcmk_role_started)) { crm_trace("Not unpacking resource history for guest node %s " "because container and connection are not known to " "be up", id); continue; } } else if (pcmk__is_remote_node(this_node)) { /* We can unpack a remote node's history only after we've unpacked * other resource history to the point that we know that the node's * connection is up, with the exception of when shutdown locks are * in use. */ pcmk_resource_t *rsc = this_node->details->remote_rsc; if ((rsc == NULL) || (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock) && (rsc->role != pcmk_role_started))) { crm_trace("Not unpacking resource history for remote node %s " "because connection is not known to be up", id); continue; } /* If fencing and shutdown locks are disabled and we're not processing * unseen nodes, then we don't want to unpack offline nodes until online * nodes have been unpacked. This allows us to number active clone * instances first. */ } else if (!pcmk_any_flags_set(scheduler->flags, pcmk_sched_fencing_enabled |pcmk_sched_shutdown_lock) && !this_node->details->online) { crm_trace("Not unpacking resource history for offline " "cluster node %s", id); continue; } if (pcmk__is_pacemaker_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); unpack_handle_remote_attrs(this_node, state, scheduler); } crm_trace("Unpacking resource history for %snode %s", (fence? "unseen " : ""), id); this_node->details->unpacked = TRUE; unpack_node_lrm(this_node, state, scheduler); rc = EAGAIN; // Other node histories might depend on this one } return rc; } /* remove nodes that are down, stopping */ /* create positive rsc_to_node constraints between resources and the nodes they are running on */ /* anything else? */ gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler) { xmlNode *state = NULL; crm_trace("Beginning unpack"); if (scheduler->tickets == NULL) { scheduler->tickets = pcmk__strkey_table(free, destroy_ticket); } for (state = pcmk__xe_first_child(status); state != NULL; state = pcmk__xe_next(state)) { if (pcmk__xe_is(state, PCMK_XE_TICKETS)) { unpack_tickets_state((xmlNode *) state, scheduler); } else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) { unpack_node_state(state, scheduler); } } while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) { crm_trace("Another pass through node resource histories is needed"); } // Now catch any nodes we didn't see unpack_node_history(status, pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled), scheduler); /* Now that we know where resources are, we can schedule stops of containers * with failed bundle connections */ if (scheduler->stop_needed != NULL) { for (GList *item = scheduler->stop_needed; item; item = item->next) { pcmk_resource_t *container = item->data; pcmk_node_t *node = pcmk__current_node(container); if (node) { stop_action(container, node, FALSE); } } g_list_free(scheduler->stop_needed); scheduler->stop_needed = NULL; } /* Now that we know status of all Pacemaker Remote connections and nodes, * we can stop connections for node shutdowns, and check the online status * of remote/guest nodes that didn't have any node history to unpack. */ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *this_node = gIter->data; if (!pcmk__is_pacemaker_remote_node(this_node)) { continue; } if (this_node->details->shutdown && (this_node->details->remote_rsc != NULL)) { pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped, "remote shutdown"); } if (!this_node->details->unpacked) { determine_remote_online_status(scheduler, this_node); } } return TRUE; } /*! * \internal * \brief Unpack node's time when it became a member at the cluster layer * * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry * \param[in,out] scheduler Scheduler data * * \return Epoch time when node became a cluster member * (or scheduler effective time for legacy entries) if a member, * 0 if not a member, or -1 if no valid information available */ static long long unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler) { const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM); int member = 0; if (member_time == NULL) { return -1LL; } else if (crm_str_to_boolean(member_time, &member) == 1) { /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was * recorded as a boolean for a DC < 2.1.7, or the node is pending * shutdown and has left the CPG, in which case it was set to 1 to avoid * fencing for PCMK_OPT_NODE_PENDING_TIMEOUT. * * We return the effective time for in_ccm=1 because what's important to * avoid fencing is that effective time minus this value is less than * the pending node timeout. */ return member? (long long) get_effective_time(scheduler) : 0LL; } else { long long when_member = 0LL; if ((pcmk__scan_ll(member_time, &when_member, 0LL) != pcmk_rc_ok) || (when_member < 0LL)) { crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM " in " PCMK__XE_NODE_STATE " entry", member_time); return -1LL; } return when_member; } } /*! * \internal * \brief Unpack node's time when it became online in process group * * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry * * \return Epoch time when node became online in process group (or 0 if not * online, or 1 for legacy online entries) */ static long long unpack_node_online(const xmlNode *node_state) { const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD); // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline" if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE, pcmk__str_casei|pcmk__str_null_matches)) { return 0LL; } else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) { return 1LL; } else { long long when_online = 0LL; if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok) || (when_online < 0)) { crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in " PCMK__XE_NODE_STATE " entry, assuming offline", peer_time); return 0LL; } return when_online; } } /*! * \internal * \brief Unpack node attribute for user-requested fencing * * \param[in] node Node to check * \param[in] node_state Node's \c PCMK__XE_NODE_STATE entry in CIB status * * \return \c true if fencing has been requested for \p node, otherwise \c false */ static bool unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state) { long long value = 0LL; int value_i = 0; const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE, NULL, pcmk__rsc_node_current); // Value may be boolean or an epoch time if (crm_str_to_boolean(value_s, &value_i) == 1) { return (value_i != 0); } if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) { return (value > 0); } crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE "node attribute for %s", value_s, pcmk__node_name(node)); return false; } static gboolean determine_online_status_no_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { gboolean online = FALSE; const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); if (when_member <= 0) { crm_trace("Node %s is %sdown", pcmk__node_name(this_node), ((when_member < 0)? "presumed " : "")); } else if (when_online > 0) { if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { online = TRUE; } else { crm_debug("Node %s is not ready to run resources: %s", pcmk__node_name(this_node), join); } } else if (this_node->details->expected_up == FALSE) { crm_trace("Node %s controller is down: " "member@%lld online@%lld join=%s expected=%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } else { /* mark it unclean */ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE); crm_info("Node %s member@%lld online@%lld join=%s expected=%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, "")); } return online; } /*! * \internal * \brief Check whether a node has taken too long to join controller group * * \param[in,out] scheduler Scheduler data * \param[in] node Node to check * \param[in] when_member Epoch time when node became a cluster member * \param[in] when_online Epoch time when node joined controller group * * \return true if node has been pending (on the way up) longer than * \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false * \note This will also update the cluster's recheck time if appropriate. */ static inline bool pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, long long when_member, long long when_online) { if ((scheduler->node_pending_timeout > 0) && (when_member > 0) && (when_online <= 0)) { // There is a timeout on pending nodes, and node is pending time_t timeout = when_member + scheduler->node_pending_timeout; if (get_effective_time(node->details->data_set) >= timeout) { return true; // Node has timed out } // Node is pending, but still has time pe__update_recheck_time(timeout, scheduler, "pending node timeout"); } return false; } static bool determine_online_status_fencing(pcmk_scheduler_t *scheduler, const xmlNode *node_state, pcmk_node_t *this_node) { bool termination_requested = unpack_node_terminate(this_node, node_state); const char *join = crm_element_value(node_state, PCMK__XA_JOIN); const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); long long when_member = unpack_node_member(node_state, scheduler); long long when_online = unpack_node_online(node_state); /* - PCMK__XA_JOIN ::= member|down|pending|banned - PCMK_XA_EXPECTED ::= member|down @COMPAT with entries recorded for DCs < 2.1.7 - PCMK__XA_IN_CCM ::= true|false - PCMK_XA_CRMD ::= online|offline Since crm_feature_set 3.18.0 (pacemaker-2.1.7): - PCMK__XA_IN_CCM ::= |0 Since when node has been a cluster member. A value 0 of means the node is not a cluster member. - PCMK_XA_CRMD ::= |0 Since when peer has been online in CPG. A value 0 means the peer is offline in CPG. */ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s", pcmk__node_name(this_node), when_member, when_online, pcmk__s(join, ""), pcmk__s(exp_state, ""), (termination_requested? " (termination requested)" : "")); if (this_node->details->shutdown) { crm_debug("%s is shutting down", pcmk__node_name(this_node)); /* Slightly different criteria since we can't shut down a dead peer */ return (when_online > 0); } if (when_member < 0) { pe_fence_node(scheduler, this_node, "peer has not been seen by the cluster", FALSE); return false; } if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) { pe_fence_node(scheduler, this_node, "peer failed Pacemaker membership criteria", FALSE); } else if (termination_requested) { if ((when_member <= 0) && (when_online <= 0) && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) { crm_info("%s was fenced as requested", pcmk__node_name(this_node)); return false; } pe_fence_node(scheduler, this_node, "fencing was requested", false); } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_null_matches)) { if (pending_too_long(scheduler, this_node, when_member, when_online)) { pe_fence_node(scheduler, this_node, "peer pending timed out on joining the process group", FALSE); } else if ((when_member > 0) || (when_online > 0)) { crm_info("- %s is not ready to run resources", pcmk__node_name(this_node)); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { crm_trace("%s is down or still coming up", pcmk__node_name(this_node)); } } else if (when_member <= 0) { // Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes pe_fence_node(scheduler, this_node, "peer is no longer part of the cluster", TRUE); } else if (when_online <= 0) { pe_fence_node(scheduler, this_node, "peer process is no longer available", FALSE); /* Everything is running at this point, now check join state */ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) { crm_info("%s is active", pcmk__node_name(this_node)); } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) { crm_info("%s is not ready to run resources", pcmk__node_name(this_node)); this_node->details->standby = TRUE; this_node->details->pending = TRUE; } else { pe_fence_node(scheduler, this_node, "peer was in an unknown state", FALSE); } return (when_member > 0); } static void determine_remote_online_status(pcmk_scheduler_t *scheduler, pcmk_node_t *this_node) { pcmk_resource_t *rsc = this_node->details->remote_rsc; pcmk_resource_t *container = NULL; pcmk_node_t *host = NULL; /* If there is a node state entry for a (former) Pacemaker Remote node * but no resource creating that node, the node's connection resource will * be NULL. Consider it an offline remote node in that case. */ if (rsc == NULL) { this_node->details->online = FALSE; goto remote_online_done; } container = rsc->container; if (container && pcmk__list_of_1(rsc->running_on)) { host = rsc->running_on->data; } /* If the resource is currently started, mark it online. */ if (rsc->role == pcmk_role_started) { crm_trace("%s node %s presumed ONLINE because connection resource is started", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = TRUE; } /* consider this node shutting down if transitioning start->stop */ if ((rsc->role == pcmk_role_started) && (rsc->next_role == pcmk_role_stopped)) { crm_trace("%s node %s shutting down because connection resource is stopping", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->shutdown = TRUE; } /* Now check all the failure conditions. */ if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) { crm_trace("Guest node %s UNCLEAN because guest resource failed", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) { crm_trace("%s node %s OFFLINE because connection resource failed", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; } else if ((rsc->role == pcmk_role_stopped) || ((container != NULL) && (container->role == pcmk_role_stopped))) { crm_trace("%s node %s OFFLINE because its resource is stopped", (container? "Guest" : "Remote"), this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = FALSE; } else if (host && (host->details->online == FALSE) && host->details->unclean) { crm_trace("Guest node %s UNCLEAN because host is unclean", this_node->details->id); this_node->details->online = FALSE; this_node->details->remote_requires_reset = TRUE; } remote_online_done: crm_trace("Remote node %s online=%s", this_node->details->id, this_node->details->online ? "TRUE" : "FALSE"); } static void determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node, pcmk_scheduler_t *scheduler) { gboolean online = FALSE; const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED); CRM_CHECK(this_node != NULL, return); this_node->details->shutdown = FALSE; this_node->details->expected_up = FALSE; if (pe__shutdown_requested(this_node)) { this_node->details->shutdown = TRUE; } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) { this_node->details->expected_up = TRUE; } if (this_node->details->type == node_ping) { this_node->details->unclean = FALSE; online = FALSE; /* As far as resource management is concerned, * the node is safely offline. * Anyone caught abusing this logic will be shot */ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { online = determine_online_status_no_fencing(scheduler, node_state, this_node); } else { online = determine_online_status_fencing(scheduler, node_state, this_node); } if (online) { this_node->details->online = TRUE; } else { /* remove node from contention */ this_node->fixed = TRUE; // @COMPAT deprecated and unused this_node->weight = -PCMK_SCORE_INFINITY; } if (online && this_node->details->shutdown) { /* don't run resources here */ this_node->fixed = TRUE; // @COMPAT deprecated and unused this_node->weight = -PCMK_SCORE_INFINITY; } if (this_node->details->type == node_ping) { crm_info("%s is not a Pacemaker node", pcmk__node_name(this_node)); } else if (this_node->details->unclean) { pcmk__sched_warn("%s is unclean", pcmk__node_name(this_node)); } else if (this_node->details->online) { crm_info("%s is %s", pcmk__node_name(this_node), this_node->details->shutdown ? "shutting down" : this_node->details->pending ? "pending" : this_node->details->standby ? "standby" : this_node->details->maintenance ? "maintenance" : "online"); } else { crm_trace("%s is offline", pcmk__node_name(this_node)); } } /*! * \internal * \brief Find the end of a resource's name, excluding any clone suffix * * \param[in] id Resource ID to check * * \return Pointer to last character of resource's base name */ const char * pe_base_name_end(const char *id) { if (!pcmk__str_empty(id)) { const char *end = id + strlen(id) - 1; for (const char *s = end; s > id; --s) { switch (*s) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case ':': return (s == end)? s : (s - 1); default: return end; } } return end; } return NULL; } /*! * \internal * \brief Get a resource name excluding any clone suffix * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_strip(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); char *basename = NULL; CRM_ASSERT(end); basename = strndup(last_rsc_id, end - last_rsc_id + 1); CRM_ASSERT(basename); return basename; } /*! * \internal * \brief Get the name of the first instance of a cloned resource * * \param[in] last_rsc_id Resource ID to check * * \return Pointer to newly allocated string with resource's base name plus :0 * \note It is the caller's responsibility to free() the result. * This asserts on error, so callers can assume result is not NULL. */ char * clone_zero(const char *last_rsc_id) { const char *end = pe_base_name_end(last_rsc_id); size_t base_name_len = end - last_rsc_id + 1; char *zero = NULL; CRM_ASSERT(end); zero = calloc(base_name_len + 3, sizeof(char)); CRM_ASSERT(zero); memcpy(zero, last_rsc_id, base_name_len); zero[base_name_len] = ':'; zero[base_name_len + 1] = '0'; return zero; } static pcmk_resource_t * create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; xmlNode *xml_rsc = create_xml_node(NULL, PCMK_XE_PRIMITIVE); copy_in_properties(xml_rsc, rsc_entry); crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id); crm_log_xml_debug(xml_rsc, "Orphan resource"); if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) { return NULL; } if (xml_contains_remote_node(xml_rsc)) { pcmk_node_t *node; crm_debug("Detected orphaned remote node %s", rsc_id); node = pe_find_node(scheduler->nodes, rsc_id); if (node == NULL) { node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, NULL, scheduler); } link_rsc2remotenode(scheduler, rsc); if (node) { crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id); node->details->shutdown = TRUE; } } if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) { /* This orphaned rsc needs to be mapped to a container. */ crm_trace("Detected orphaned container filler %s", rsc_id); pcmk__set_rsc_flags(rsc, pcmk_rsc_removed_filler); } pcmk__set_rsc_flags(rsc, pcmk_rsc_removed); scheduler->resources = g_list_append(scheduler->resources, rsc); return rsc; } /*! * \internal * \brief Create orphan instance for anonymous clone resource history * * \param[in,out] parent Clone resource that orphan will be added to * \param[in] rsc_id Orphan's resource ID * \param[in] node Where orphan is active (for logging only) * \param[in,out] scheduler Scheduler data * * \return Newly added orphaned instance of \p parent */ static pcmk_resource_t * create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *top = pe__create_clone_child(parent, scheduler); // find_rsc() because we might be a cloned group pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pcmk_rsc_match_clone_only); pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s", top->id, parent->id, rsc_id, pcmk__node_name(node)); return orphan; } /*! * \internal * \brief Check a node for an instance of an anonymous clone * * Return a child instance of the specified anonymous clone, in order of * preference: (1) the instance running on the specified node, if any; * (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX * instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX * instances are already active). * * \param[in,out] scheduler Scheduler data * \param[in] node Node on which to check for instance * \param[in,out] parent Clone to check * \param[in] rsc_id Name of cloned resource in history (no instance) */ static pcmk_resource_t * find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, pcmk_resource_t *parent, const char *rsc_id) { GList *rIter = NULL; pcmk_resource_t *rsc = NULL; pcmk_resource_t *inactive_instance = NULL; gboolean skip_inactive = FALSE; CRM_ASSERT(parent != NULL); CRM_ASSERT(pcmk__is_clone(parent)); CRM_ASSERT(!pcmk_is_set(parent->flags, pcmk_rsc_unique)); // Check for active (or partially active, for cloned groups) instance pcmk__rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, pcmk__node_name(node), parent->id); for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) { GList *locations = NULL; pcmk_resource_t *child = rIter->data; /* Check whether this instance is already known to be active or pending * anywhere, at this stage of unpacking. Because this function is called * for a resource before the resource's individual operation history * entries are unpacked, locations will generally not contain the * desired node. * * However, there are three exceptions: * (1) when child is a cloned group and we have already unpacked the * history of another member of the group on the same node; * (2) when we've already unpacked the history of another numbered * instance on the same node (which can happen if * PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and * (3) when we re-run calculations on the same scheduler data as part of * a simulation. */ child->fns->location(child, &locations, 2); if (locations) { /* We should never associate the same numbered anonymous clone * instance with multiple nodes, and clone instances can't migrate, * so there must be only one location, regardless of history. */ CRM_LOG_ASSERT(locations->next == NULL); if (pcmk__same_node((pcmk_node_t *) locations->data, node)) { /* This child instance is active on the requested node, so check * for a corresponding configured resource. We use find_rsc() * instead of child because child may be a cloned group, and we * need the particular member corresponding to rsc_id. * * If the history entry is orphaned, rsc will be NULL. */ rsc = parent->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); if (rsc) { /* If there are multiple instance history entries for an * anonymous clone in a single node's history (which can * happen if PCMK_META_GLOBALLY_UNIQUE is switched from true * to false), we want to consider the instances beyond the * first as orphans, even if there are inactive instance * numbers available. */ if (rsc->running_on) { crm_notice("Active (now-)anonymous clone %s has " "multiple (orphan) instance histories on %s", parent->id, pcmk__node_name(node)); skip_inactive = TRUE; rsc = NULL; } else { pcmk__rsc_trace(parent, "Resource %s, active", rsc->id); } } } g_list_free(locations); } else { pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id); if (!skip_inactive && !inactive_instance && !pcmk_is_set(child->flags, pcmk_rsc_blocked)) { // Remember one inactive instance in case we don't find active inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL, pcmk_rsc_match_clone_only); /* ... but don't use it if it was already associated with a * pending action on another node */ if ((inactive_instance != NULL) && (inactive_instance->pending_node != NULL) && !pcmk__same_node(inactive_instance->pending_node, node)) { inactive_instance = NULL; } } } } if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) { pcmk__rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id); rsc = inactive_instance; } /* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or * PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we * don't want to consume a valid instance number for unclean nodes. Such * instances may appear to be active according to the history, but should be * considered inactive, so we can start an instance elsewhere. Treat such * instances as orphans. * * An exception is instances running on guest nodes -- since guest node * "fencing" is actually just a resource stop, requires shouldn't apply. * * @TODO Ideally, we'd use an inactive instance number if it is not needed * for any clean instances. However, we don't know that at this point. */ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing) && (!node->details->online || node->details->unclean) && !pcmk__is_guest_or_bundle_node(node) && !pe__is_universal_clone(parent, scheduler)) { rsc = NULL; } if (rsc == NULL) { rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler); pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id); } return rsc; } static pcmk_resource_t * unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node, const char *rsc_id) { pcmk_resource_t *rsc = NULL; pcmk_resource_t *parent = NULL; crm_trace("looking for %s", rsc_id); rsc = pe_find_resource(scheduler->resources, rsc_id); if (rsc == NULL) { /* If we didn't find the resource by its name in the operation history, * check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0, * we create a single :0 orphan to match against here. */ char *clone0_id = clone_zero(rsc_id); pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources, clone0_id); if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) { rsc = clone0; parent = uber_parent(clone0); crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id); } else { crm_trace("%s is not known as %s either (orphan)", rsc_id, clone0_id); } free(clone0_id); } else if (rsc->variant > pcmk_rsc_variant_primitive) { crm_trace("Resource history for %s is orphaned because it is no longer primitive", rsc_id); return NULL; } else { parent = uber_parent(rsc); } if (pcmk__is_anonymous_clone(parent)) { if (pcmk__is_bundled(parent)) { rsc = pe__find_bundle_replica(parent->parent, node); } else { char *base = clone_strip(rsc_id); rsc = find_anonymous_clone(scheduler, node, parent, base); free(base); CRM_ASSERT(rsc != NULL); } } if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei) && !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) { pcmk__str_update(&rsc->clone_name, rsc_id); pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s", rsc_id, pcmk__node_name(node), rsc->id, pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : ""); } return rsc; } static pcmk_resource_t * process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { pcmk_resource_t *rsc = NULL; const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID); crm_debug("Detected orphan resource %s on %s", rsc_id, pcmk__node_name(node)); rsc = create_fake_resource(rsc_id, rsc_entry, scheduler); if (rsc == NULL) { return NULL; } if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) { pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed); } else { CRM_CHECK(rsc != NULL, return NULL); pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id); resource_location(rsc, NULL, -PCMK_SCORE_INFINITY, "__orphan_do_not_run__", scheduler); } return rsc; } static void process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node, enum action_fail_response on_fail) { pcmk_node_t *tmpnode = NULL; char *reason = NULL; enum action_fail_response save_on_fail = pcmk_on_fail_ignore; CRM_ASSERT(rsc); pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s", rsc->id, pcmk_role_text(rsc->role), pcmk__node_name(node), pcmk_on_fail_text(on_fail)); /* process current state */ if (rsc->role != pcmk_role_unknown) { pcmk_resource_t *iter = rsc; while (iter) { if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) { pcmk_node_t *n = pe__copy_node(node); pcmk__rsc_trace(rsc, "%s%s%s known on %s", rsc->id, ((rsc->clone_name == NULL)? "" : " also known as "), ((rsc->clone_name == NULL)? "" : rsc->clone_name), pcmk__node_name(n)); g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n); } if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) { break; } iter = iter->parent; } } /* If a managed resource is believed to be running, but node is down ... */ if ((rsc->role > pcmk_role_stopped) && node->details->online == FALSE && node->details->maintenance == FALSE && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { gboolean should_fence = FALSE; /* If this is a guest node, fence it (regardless of whether fencing is * enabled, because guest node fencing is done by recovery of the * container resource rather than by the fencer). Mark the resource * we're processing as failed. When the guest comes back up, its * operation history in the CIB will be cleared, freeing the affected * resource to run again once we are sure we know its state. */ if (pcmk__is_guest_or_bundle_node(node)) { pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); should_fence = TRUE; } else if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) { if (pcmk__is_remote_node(node) && (node->details->remote_rsc != NULL) && !pcmk_is_set(node->details->remote_rsc->flags, pcmk_rsc_failed)) { /* Setting unseen means that fencing of the remote node will * occur only if the connection resource is not going to start * somewhere. This allows connection resources on a failed * cluster node to move to another node without requiring the * remote nodes to be fenced as well. */ node->details->unseen = TRUE; reason = crm_strdup_printf("%s is active there (fencing will be" " revoked if remote connection can " "be re-established elsewhere)", rsc->id); } should_fence = TRUE; } if (should_fence) { if (reason == NULL) { reason = crm_strdup_printf("%s is thought to be active there", rsc->id); } pe_fence_node(rsc->cluster, node, reason, FALSE); } free(reason); } /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */ save_on_fail = on_fail; if (node->details->unclean) { /* No extra processing needed * Also allows resources to be started again after a node is shot */ on_fail = pcmk_on_fail_ignore; } switch (on_fail) { case pcmk_on_fail_ignore: /* nothing to do */ break; case pcmk_on_fail_demote: pcmk__set_rsc_flags(rsc, pcmk_rsc_failed); demote_action(rsc, node, FALSE); break; case pcmk_on_fail_fence_node: /* treat it as if it is still running * but also mark the node as unclean */ reason = crm_strdup_printf("%s failed there", rsc->id); pe_fence_node(rsc->cluster, node, reason, FALSE); free(reason); break; case pcmk_on_fail_standby_node: node->details->standby = TRUE; node->details->standby_onfail = TRUE; break; case pcmk_on_fail_block: /* is_managed == FALSE will prevent any * actions being sent for the resource */ pcmk__clear_rsc_flags(rsc, pcmk_rsc_managed); pcmk__set_rsc_flags(rsc, pcmk_rsc_blocked); break; case pcmk_on_fail_ban: /* make sure it comes up somewhere else * or not at all */ resource_location(rsc, node, -PCMK_SCORE_INFINITY, "__action_migration_auto__", rsc->cluster); break; case pcmk_on_fail_stop: pe__set_next_role(rsc, pcmk_role_stopped, PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP); break; case pcmk_on_fail_restart: if ((rsc->role != pcmk_role_stopped) && (rsc->role != pcmk_role_unknown)) { pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); stop_action(rsc, node, FALSE); } break; case pcmk_on_fail_restart_container: pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); if ((rsc->container != NULL) && pcmk__is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than * the bundle's container. We don't necessarily know where the * container is running yet, so remember it and add a stop * action for it later. */ rsc->cluster->stop_needed = g_list_prepend(rsc->cluster->stop_needed, rsc->container); } else if (rsc->container) { stop_action(rsc->container, node, FALSE); } else if ((rsc->role != pcmk_role_stopped) && (rsc->role != pcmk_role_unknown)) { stop_action(rsc, node, FALSE); } break; case pcmk_on_fail_reset_remote: pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id); } if (pcmk__is_remote_node(tmpnode) && !(tmpnode->details->remote_was_fenced)) { /* The remote connection resource failed in a way that * should result in fencing the remote node. */ pe_fence_node(rsc->cluster, tmpnode, "remote connection is unrecoverable", FALSE); } } /* require the stop action regardless if fencing is occurring or not. */ if (rsc->role > pcmk_role_stopped) { stop_action(rsc, node, FALSE); } /* if reconnect delay is in use, prevent the connection from exiting the * "STOPPED" role until the failure is cleared by the delay timeout. */ if (rsc->remote_reconnect_ms) { pe__set_next_role(rsc, pcmk_role_stopped, "remote reset"); } break; } /* ensure a remote-node connection failure forces an unclean remote-node * to be fenced. By setting unseen = FALSE, the remote-node failure will * result in a fencing operation regardless if we're going to attempt to * reconnect to the remote-node in this transition or not. */ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) { tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id); if (tmpnode && tmpnode->details->unclean) { tmpnode->details->unseen = FALSE; } } if ((rsc->role != pcmk_role_stopped) && (rsc->role != pcmk_role_unknown)) { if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) { if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { pcmk__config_warn("Detected active orphan %s running on %s", rsc->id, pcmk__node_name(node)); } else { pcmk__config_warn("Resource '%s' must be stopped manually on " "%s because cluster is configured not to " "stop active orphans", rsc->id, pcmk__node_name(node)); } } native_add_running(rsc, node, rsc->cluster, (save_on_fail != pcmk_on_fail_ignore)); switch (on_fail) { case pcmk_on_fail_ignore: break; case pcmk_on_fail_demote: case pcmk_on_fail_block: pcmk__set_rsc_flags(rsc, pcmk_rsc_failed); break; default: pcmk__set_rsc_flags(rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); break; } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { /* Only do this for older status sections that included instance numbers * Otherwise stopped instances will appear as orphans */ pcmk__rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id); free(rsc->clone_name); rsc->clone_name = NULL; } else { GList *possible_matches = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, FALSE); GList *gIter = possible_matches; for (; gIter != NULL; gIter = gIter->next) { pcmk_action_t *stop = (pcmk_action_t *) gIter->data; pcmk__set_action_flags(stop, pcmk_action_optional); } g_list_free(possible_matches); } /* A successful stop after migrate_to on the migration source doesn't make * the partially migrated resource stopped on the migration target. */ if ((rsc->role == pcmk_role_stopped) && rsc->partial_migration_source && rsc->partial_migration_source->details == node->details && rsc->partial_migration_target && rsc->running_on) { rsc->role = pcmk_role_started; } } /* create active recurring operations as optional */ static void process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc, int start_index, int stop_index, GList *sorted_op_list, pcmk_scheduler_t *scheduler) { int counter = -1; const char *task = NULL; const char *status = NULL; GList *gIter = sorted_op_list; CRM_ASSERT(rsc); pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index); for (; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; guint interval_ms = 0; char *key = NULL; const char *id = pcmk__xe_id(rsc_op); counter++; if (node->details->online == FALSE) { pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline", rsc->id, pcmk__node_name(node)); break; /* Need to check if there's a monitor for role="Stopped" */ } else if (start_index < stop_index && counter <= stop_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active", id, pcmk__node_name(node)); continue; } else if (counter < start_index) { pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d", id, pcmk__node_name(node), counter); continue; } crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms); if (interval_ms == 0) { pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring", id, pcmk__node_name(node)); continue; } status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS); if (pcmk__str_eq(status, "-1", pcmk__str_casei)) { pcmk__rsc_trace(rsc, "Skipping %s on %s: status", id, pcmk__node_name(node)); continue; } task = crm_element_value(rsc_op, PCMK_XA_OPERATION); /* create the action */ key = pcmk__op_key(rsc->id, task, interval_ms); pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node)); custom_action(rsc, key, task, node, TRUE, scheduler); } } void calculate_active_ops(const GList *sorted_op_list, int *start_index, int *stop_index) { int counter = -1; int implied_monitor_start = -1; int implied_clone_start = -1; const char *task = NULL; const char *status = NULL; *stop_index = -1; *start_index = -1; for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) { const xmlNode *rsc_op = (const xmlNode *) iter->data; counter++; task = crm_element_value(rsc_op, PCMK_XA_OPERATION); status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS); if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei) && pcmk__str_eq(status, "0", pcmk__str_casei)) { *stop_index = counter; } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MIGRATE_FROM, NULL)) { *start_index = counter; } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE); if (pcmk__strcase_any_of(rc, "0", "8", NULL)) { implied_monitor_start = counter; } } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, NULL)) { implied_clone_start = counter; } } if (*start_index == -1) { if (implied_clone_start != -1) { *start_index = implied_clone_start; } else if (implied_monitor_start != -1) { *start_index = implied_monitor_start; } } } // If resource history entry has shutdown lock, remember lock node and time static void unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler) { time_t lock_time = 0; // When lock started (i.e. node shutdown time) if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK, &lock_time) == pcmk_ok) && (lock_time != 0)) { if ((scheduler->shutdown_lock > 0) && (get_effective_time(scheduler) > (lock_time + scheduler->shutdown_lock))) { pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired", rsc->id, pcmk__node_name(node)); pe__clear_resource_history(rsc, node); } else { /* @COMPAT I don't like breaking const signatures, but * rsc->lock_node should really be const -- we just can't change it * until the next API compatibility break. */ rsc->lock_node = (pcmk_node_t *) node; rsc->lock_time = lock_time; } } } /*! * \internal * \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status * * \param[in,out] node Node whose status is being unpacked * \param[in] rsc_entry \c PCMK__XE_LRM_RESOURCE XML being unpacked * \param[in,out] scheduler Scheduler data * * \return Resource corresponding to the entry, or NULL if no operation history */ static pcmk_resource_t * unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource, pcmk_scheduler_t *scheduler) { GList *gIter = NULL; int stop_index = -1; int start_index = -1; enum rsc_role_e req_role = pcmk_role_unknown; const char *rsc_id = pcmk__xe_id(lrm_resource); pcmk_resource_t *rsc = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; enum action_fail_response on_fail = pcmk_on_fail_ignore; enum rsc_role_e saved_role = pcmk_role_unknown; if (rsc_id == NULL) { pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE " entry: No " PCMK_XA_ID); crm_log_xml_info(lrm_resource, "missing-id"); return NULL; } crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s", rsc_id, pcmk__node_name(node)); /* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort * them */ for (rsc_op = first_named_child(lrm_resource, PCMK__XE_LRM_RSC_OP); rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) { op_list = g_list_prepend(op_list, rsc_op); } if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } } /* find the resource */ rsc = unpack_find_resource(scheduler, node, rsc_id); if (rsc == NULL) { if (op_list == NULL) { // If there are no operations, there is nothing to do return NULL; } else { rsc = process_orphan_resource(lrm_resource, node, scheduler); } } CRM_ASSERT(rsc != NULL); // Check whether the resource is "shutdown-locked" to this node if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) { unpack_shutdown_lock(lrm_resource, rsc, node, scheduler); } /* process operations */ saved_role = rsc->role; rsc->role = pcmk_role_unknown; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail); } /* create active recurring operations as optional */ calculate_active_ops(sorted_op_list, &start_index, &stop_index); process_recurring(node, rsc, start_index, stop_index, sorted_op_list, scheduler); /* no need to free the contents */ g_list_free(sorted_op_list); process_rsc_state(rsc, node, on_fail); if (get_target_role(rsc, &req_role)) { if ((rsc->next_role == pcmk_role_unknown) || (req_role < rsc->next_role)) { pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE); } else if (req_role > rsc->next_role) { pcmk__rsc_info(rsc, "%s: Not overwriting calculated next role %s" " with requested next role %s", rsc->id, pcmk_role_text(rsc->next_role), pcmk_role_text(req_role)); } } if (saved_role > rsc->role) { rsc->role = saved_role; } return rsc; } static void handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list, pcmk_scheduler_t *scheduler) { for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) { pcmk_resource_t *rsc; pcmk_resource_t *container; const char *rsc_id; const char *container_id; if (!pcmk__xe_is(rsc_entry, PCMK__XE_LRM_RESOURCE)) { continue; } container_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER); rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID); if (container_id == NULL || rsc_id == NULL) { continue; } container = pe_find_resource(scheduler->resources, container_id); if (container == NULL) { continue; } rsc = pe_find_resource(scheduler->resources, rsc_id); if ((rsc == NULL) || (rsc->container != NULL) || !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) { continue; } pcmk__rsc_trace(rsc, "Mapped container of orphaned resource %s to %s", rsc->id, container_id); rsc->container = container; container->fillers = g_list_append(container->fillers, rsc); } } /*! * \internal * \brief Unpack one node's lrm status section * * \param[in,out] node Node whose status is being unpacked * \param[in] xml CIB node state XML * \param[in,out] scheduler Scheduler data */ static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml, pcmk_scheduler_t *scheduler) { bool found_orphaned_container_filler = false; // Drill down to PCMK__XE_LRM_RESOURCES section xml = find_xml_node(xml, PCMK__XE_LRM, FALSE); if (xml == NULL) { return; } xml = find_xml_node(xml, PCMK__XE_LRM_RESOURCES, FALSE); if (xml == NULL) { return; } // Unpack each PCMK__XE_LRM_RESOURCE entry for (const xmlNode *rsc_entry = first_named_child(xml, PCMK__XE_LRM_RESOURCE); rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) { pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler); if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) { found_orphaned_container_filler = true; } } /* Now that all resource state has been unpacked for this node, map any * orphaned container fillers to their container resource. */ if (found_orphaned_container_filler) { handle_orphaned_container_fillers(xml, scheduler); } } static void set_active(pcmk_resource_t *rsc) { const pcmk_resource_t *top = pe__const_top_resource(rsc, false); if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) { rsc->role = pcmk_role_unpromoted; } else { rsc->role = pcmk_role_started; } } static void set_node_score(gpointer key, gpointer value, gpointer user_data) { pcmk_node_t *node = value; int *score = user_data; node->weight = *score; } #define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \ "/" PCMK__XE_NODE_STATE #define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM \ "/" PCMK__XE_LRM_RESOURCES \ "/" PCMK__XE_LRM_RESOURCE #define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP static xmlNode * find_lrm_op(const char *resource, const char *op, const char *node, const char *source, int target_rc, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']" SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'", NULL); /* Need to check against transition_magic too? */ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) { pcmk__g_strcat(xpath, " and @" PCMK__META_MIGRATE_TARGET "='", source, "']", NULL); } else if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) { pcmk__g_strcat(xpath, " and @" PCMK__META_MIGRATE_SOURCE "='", source, "']", NULL); } else { g_string_append_c(xpath, ']'); } xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); if (xml && target_rc >= 0) { int rc = PCMK_OCF_UNKNOWN_ERROR; int status = PCMK_EXEC_ERROR; crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc); crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status); if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) { return NULL; } } return xml; } static xmlNode * find_lrm_resource(const char *rsc_id, const char *node_name, pcmk_scheduler_t *scheduler) { GString *xpath = NULL; xmlNode *xml = NULL; CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL); xpath = g_string_sized_new(256); pcmk__g_strcat(xpath, XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']", NULL); xml = get_xpath_object((const char *) xpath->str, scheduler->input, LOG_DEBUG); g_string_free(xpath, TRUE); return xml; } /*! * \internal * \brief Check whether a resource has no completed action history on a node * * \param[in,out] rsc Resource to check * \param[in] node_name Node to check * * \return true if \p rsc_id is unknown on \p node_name, otherwise false */ static bool unknown_on_node(pcmk_resource_t *rsc, const char *node_name) { bool result = false; xmlXPathObjectPtr search; char *xpath = NULL; xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" SUB_XPATH_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "!='%d']", node_name, rsc->id, PCMK_OCF_UNKNOWN); search = xpath_search(rsc->cluster->input, xpath); result = (numXpathResults(search) == 0); freeXpathObject(search); free(xpath); return result; } /*! * \brief Check whether a probe/monitor indicating the resource was not running * on a node happened after some event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that monitor is being compared to * \param[in] same_node Whether the operations are on the same node * \param[in,out] scheduler Scheduler data * * \return true if such a monitor happened after event, false otherwise */ static bool monitor_not_running_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, bool same_node, pcmk_scheduler_t *scheduler) { /* Any probe/monitor operation on the node indicating it was not running * there */ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name, NULL, PCMK_OCF_NOT_RUNNING, scheduler); return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0); } /*! * \brief Check whether any non-monitor operation on a node happened after some * event * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] xml_op Event that non-monitor is being compared to * \param[in] same_node Whether the operations are on the same node * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool non_monitor_after(const char *rsc_id, const char *node_name, const xmlNode *xml_op, bool same_node, pcmk_scheduler_t *scheduler) { xmlNode *lrm_resource = NULL; lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler); if (lrm_resource == NULL) { return false; } for (xmlNode *op = first_named_child(lrm_resource, PCMK__XE_LRM_RSC_OP); op != NULL; op = crm_next_same_xml(op)) { const char * task = NULL; if (op == xml_op) { continue; } task = crm_element_value(op, PCMK_XA_OPERATION); if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, NULL) && pe__is_newer_op(op, xml_op, same_node) > 0) { return true; } } return false; } /*! * \brief Check whether the resource has newer state on a node after a migration * attempt * * \param[in] rsc_id Resource being checked * \param[in] node_name Node being checked * \param[in] migrate_to Any migrate_to event that is being compared to * \param[in] migrate_from Any migrate_from event that is being compared to * \param[in,out] scheduler Scheduler data * * \return true if such a operation happened after event, false otherwise */ static bool newer_state_after_migrate(const char *rsc_id, const char *node_name, const xmlNode *migrate_to, const xmlNode *migrate_from, pcmk_scheduler_t *scheduler) { const xmlNode *xml_op = migrate_to; const char *source = NULL; const char *target = NULL; bool same_node = false; if (migrate_from) { xml_op = migrate_from; } source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE); target = crm_element_value(xml_op, PCMK__META_MIGRATE_TARGET); /* It's preferred to compare to the migrate event on the same node if * existing, since call ids are more reliable. */ if (pcmk__str_eq(node_name, target, pcmk__str_casei)) { if (migrate_from) { xml_op = migrate_from; same_node = true; } else { xml_op = migrate_to; } } else if (pcmk__str_eq(node_name, source, pcmk__str_casei)) { if (migrate_to) { xml_op = migrate_to; same_node = true; } else { xml_op = migrate_from; } } /* If there's any newer non-monitor operation on the node, or any newer * probe/monitor operation on the node indicating it was not running there, * the migration events potentially no longer matter for the node. */ return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler) || monitor_not_running_after(rsc_id, node_name, xml_op, same_node, scheduler); } /*! * \internal * \brief Parse migration source and target node names from history entry * * \param[in] entry Resource history entry for a migration action * \param[in] source_node If not NULL, source must match this node * \param[in] target_node If not NULL, target must match this node * \param[out] source_name Where to store migration source node name * \param[out] target_name Where to store migration target node name * * \return Standard Pacemaker return code */ static int get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node, const pcmk_node_t *target_node, const char **source_name, const char **target_name) { *source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE); *target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET); if ((*source_name == NULL) || (*target_name == NULL)) { pcmk__config_err("Ignoring resource history entry %s without " PCMK__META_MIGRATE_SOURCE " and " PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry)); return pcmk_rc_unpack_error; } if ((source_node != NULL) && !pcmk__str_eq(*source_name, source_node->details->uname, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " PCMK__META_MIGRATE_SOURCE "='%s' does not match %s", pcmk__xe_id(entry), *source_name, pcmk__node_name(source_node)); return pcmk_rc_unpack_error; } if ((target_node != NULL) && !pcmk__str_eq(*target_name, target_node->details->uname, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__config_err("Ignoring resource history entry %s because " PCMK__META_MIGRATE_TARGET "='%s' does not match %s", pcmk__xe_id(entry), *target_name, pcmk__node_name(target_node)); return pcmk_rc_unpack_error; } return pcmk_rc_ok; } /* * \internal * \brief Add a migration source to a resource's list of dangling migrations * * If the migrate_to and migrate_from actions in a live migration both * succeeded, but there is no stop on the source, the migration is considered * "dangling." Add the source to the resource's dangling migration list, which * will be used to schedule a stop on the source without affecting the target. * * \param[in,out] rsc Resource involved in migration * \param[in] node Migration source */ static void add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node) { pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s", rsc->id, pcmk__node_name(node)); rsc->role = pcmk_role_stopped; rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, (gpointer) node); } /*! * \internal * \brief Update resource role etc. after a successful migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_success(struct action_history *history) { /* A complete migration sequence is: * 1. migrate_to on source node (which succeeded if we get to this function) * 2. migrate_from on target node * 3. stop on source node * * If no migrate_from has happened, the migration is considered to be * "partial". If the migrate_from succeeded but no stop has happened, the * migration is considered to be "dangling". * * If a successful migrate_to and stop have happened on the source node, we * still need to check for a partial migration, due to scenarios (easier to * produce with batch-limit=1) like: * * - A resource is migrating from node1 to node2, and a migrate_to is * initiated for it on node1. * * - node2 goes into standby mode while the migrate_to is pending, which * aborts the transition. * * - Upon completion of the migrate_to, a new transition schedules a stop * on both nodes and a start on node1. * * - If the new transition is aborted for any reason while the resource is * stopping on node1, the transition after that stop completes will see * the migrate_to and stop on the source, but it's still a partial * migration, and the resource must be stopped on node2 because it is * potentially active there due to the migrate_to. * * We also need to take into account that either node's history may be * cleared at any point in the migration process. */ int from_rc = PCMK_OCF_OK; int from_status = PCMK_EXEC_PENDING; pcmk_node_t *target_node = NULL; xmlNode *migrate_from = NULL; const char *source = NULL; const char *target = NULL; bool source_newer_op = false; bool target_newer_state = false; bool active_on_target = false; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } // Check for newer state on the source source_newer_op = non_monitor_after(history->rsc->id, source, history->xml, true, history->rsc->cluster); // Check for a migrate_from action from this source on the target migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, -1, history->rsc->cluster); if (migrate_from != NULL) { if (source_newer_op) { /* There's a newer non-monitor operation on the source and a * migrate_from on the target, so this migrate_to is irrelevant to * the resource's state. */ return; } crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc); crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status); } /* If the resource has newer state on both the source and target after the * migration events, this migrate_to is irrelevant to the resource's state. */ target_newer_state = newer_state_after_migrate(history->rsc->id, target, history->xml, migrate_from, history->rsc->cluster); if (source_newer_op && target_newer_state) { return; } /* Check for dangling migration (migrate_from succeeded but stop not done). * We know there's no stop because we already returned if the target has a * migrate_from and the source has any newer non-monitor operation. */ if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) { add_dangling_migration(history->rsc, history->node); return; } /* Without newer state, this migrate_to implies the resource is active. * (Clones are not allowed to migrate, so role can't be promoted.) */ history->rsc->role = pcmk_role_started; target_node = pe_find_node(history->rsc->cluster->nodes, target); active_on_target = !target_newer_state && (target_node != NULL) && target_node->details->online; if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target if (active_on_target) { native_add_running(history->rsc, target_node, history->rsc->cluster, TRUE); } else { // Mark resource as failed, require recovery, and prevent migration pcmk__set_rsc_flags(history->rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable); } return; } // The migrate_from is pending, complete but erased, or to be scheduled /* If there is no history at all for the resource on an online target, then * it was likely cleaned. Just return, and we'll schedule a probe. Once we * have the probe result, it will be reflected in target_newer_state. */ if ((target_node != NULL) && target_node->details->online && unknown_on_node(history->rsc, target)) { return; } if (active_on_target) { pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes, source); native_add_running(history->rsc, target_node, history->rsc->cluster, FALSE); if ((source_node != NULL) && source_node->details->online) { /* This is a partial migration: the migrate_to completed * successfully on the source, but the migrate_from has not * completed. Remember the source and target; if the newly * chosen target remains the same when we schedule actions * later, we may continue with the migration. */ history->rsc->partial_migration_target = target_node; history->rsc->partial_migration_source = source_node; } } else if (!source_newer_op) { // Mark resource as failed, require recovery, and prevent migration pcmk__set_rsc_flags(history->rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_migratable); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_to action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_to_failure(struct action_history *history) { xmlNode *target_migrate_from = NULL; const char *source = NULL; const char *target = NULL; // Get source and target node names from XML if (get_migration_node_names(history->xml, history->node, NULL, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->role = pcmk_role_started; // Check for migrate_from on the target target_migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM, target, source, PCMK_OCF_OK, history->rsc->cluster); if (/* If the resource state is unknown on the target, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, target) /* If the resource has newer state on the target after the migration * events, this migrate_to no longer matters for the target. */ && !newer_state_after_migrate(history->rsc->id, target, history->xml, target_migrate_from, history->rsc->cluster)) { /* The resource has no newer state on the target, so assume it's still * active there. * (if it is up). */ pcmk_node_t *target_node = pe_find_node(history->rsc->cluster->nodes, target); if (target_node && target_node->details->online) { native_add_running(history->rsc, target_node, history->rsc->cluster, FALSE); } } else if (!non_monitor_after(history->rsc->id, source, history->xml, true, history->rsc->cluster)) { /* We know the resource has newer state on the target, but this * migrate_to still matters for the source as long as there's no newer * non-monitor operation there. */ // Mark node as having dangling migration so we can force a stop later history->rsc->dangling_migrations = g_list_prepend(history->rsc->dangling_migrations, (gpointer) history->node); } } /*! * \internal * \brief Update resource role etc. after a failed migrate_from action * * \param[in,out] history Parsed action result history */ static void unpack_migrate_from_failure(struct action_history *history) { xmlNode *source_migrate_to = NULL; const char *source = NULL; const char *target = NULL; // Get source and target node names from XML if (get_migration_node_names(history->xml, NULL, history->node, &source, &target) != pcmk_rc_ok) { return; } /* If a migration failed, we have to assume the resource is active. Clones * are not allowed to migrate, so role can't be promoted. */ history->rsc->role = pcmk_role_started; // Check for a migrate_to on the source source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO, source, target, PCMK_OCF_OK, history->rsc->cluster); if (/* If the resource state is unknown on the source, it will likely be * probed there. * Don't just consider it running there. We will get back here anyway in * case the probe detects it's running there. */ !unknown_on_node(history->rsc, source) /* If the resource has newer state on the source after the migration * events, this migrate_from no longer matters for the source. */ && !newer_state_after_migrate(history->rsc->id, source, source_migrate_to, history->xml, history->rsc->cluster)) { /* The resource has no newer state on the source, so assume it's still * active there (if it is up). */ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes, source); if (source_node && source_node->details->online) { native_add_running(history->rsc, source_node, history->rsc->cluster, TRUE); } } } /*! * \internal * \brief Add an action to cluster's list of failed actions * * \param[in,out] history Parsed action result history */ static void record_failed_op(struct action_history *history) { if (!(history->node->details->online)) { return; } for (const xmlNode *xIter = history->rsc->cluster->failed->children; xIter != NULL; xIter = xIter->next) { const char *key = pcmk__xe_history_key(xIter); const char *uname = crm_element_value(xIter, PCMK_XA_UNAME); if (pcmk__str_eq(history->key, key, pcmk__str_none) && pcmk__str_eq(uname, history->node->details->uname, pcmk__str_casei)) { crm_trace("Skipping duplicate entry %s on %s", history->key, pcmk__node_name(history->node)); return; } } crm_trace("Adding entry for %s on %s to failed action list", history->key, pcmk__node_name(history->node)); crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname); crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id); pcmk__xml_copy(history->rsc->cluster->failed, history->xml); } static char * last_change_str(const xmlNode *xml_op) { time_t when; char *result = NULL; if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE, &when) == pcmk_ok) { char *when_s = pcmk__epoch2str(&when, 0); const char *p = strchr(when_s, ' '); // Skip day of week to make message shorter if ((p != NULL) && (*(++p) != '\0')) { result = strdup(p); CRM_ASSERT(result != NULL); } free(when_s); } if (result == NULL) { result = strdup("unknown time"); CRM_ASSERT(result != NULL); } return result; } /*! * \internal * \brief Compare two on-fail values * * \param[in] first One on-fail value to compare * \param[in] second The other on-fail value to compare * * \return A negative number if second is more severe than first, zero if they * are equal, or a positive number if first is more severe than second. * \note This is only needed until the action_fail_response values can be * renumbered at the next API compatibility break. */ static int cmp_on_fail(enum action_fail_response first, enum action_fail_response second) { switch (first) { case pcmk_on_fail_demote: switch (second) { case pcmk_on_fail_ignore: return 1; case pcmk_on_fail_demote: return 0; default: return -1; } break; case pcmk_on_fail_reset_remote: switch (second) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: return 1; case pcmk_on_fail_reset_remote: return 0; default: return -1; } break; case pcmk_on_fail_restart_container: switch (second) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: case pcmk_on_fail_reset_remote: return 1; case pcmk_on_fail_restart_container: return 0; default: return -1; } break; default: break; } switch (second) { case pcmk_on_fail_demote: return (first == pcmk_on_fail_ignore)? -1 : 1; case pcmk_on_fail_reset_remote: switch (first) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: return -1; default: return 1; } break; case pcmk_on_fail_restart_container: switch (first) { case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: case pcmk_on_fail_reset_remote: return -1; default: return 1; } break; default: break; } return first - second; } /*! * \internal * \brief Ban a resource (or its clone if an anonymous instance) from all nodes * * \param[in,out] rsc Resource to ban */ static void ban_from_all_nodes(pcmk_resource_t *rsc) { int score = -PCMK_SCORE_INFINITY; pcmk_resource_t *fail_rsc = rsc; if (fail_rsc->parent != NULL) { pcmk_resource_t *parent = uber_parent(fail_rsc); if (pcmk__is_anonymous_clone(parent)) { /* For anonymous clones, if an operation with * PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the * entire clone must stop. */ fail_rsc = parent; } } // Ban the resource from all nodes crm_notice("%s will not be started under current conditions", fail_rsc->id); if (fail_rsc->allowed_nodes != NULL) { g_hash_table_destroy(fail_rsc->allowed_nodes); } fail_rsc->allowed_nodes = pe__node_list2table(rsc->cluster->nodes); g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score); } /*! * \internal * \brief Get configured failure handling and role after failure for an action * * \param[in,out] history Unpacked action history entry * \param[out] on_fail Where to set configured failure handling * \param[out] fail_role Where to set to role after failure */ static void unpack_failure_handling(struct action_history *history, enum action_fail_response *on_fail, enum rsc_role_e *fail_role) { xmlNode *config = pcmk__find_action_config(history->rsc, history->task, history->interval_ms, true); GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node, history->task, history->interval_ms, config); const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL); *on_fail = pcmk__parse_on_fail(history->rsc, history->task, history->interval_ms, on_fail_str); *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail, meta); g_hash_table_destroy(meta); } /*! * \internal * \brief Update resource role, failure handling, etc., after a failed action * * \param[in,out] history Parsed action result history * \param[in] config_on_fail Action failure handling from configuration * \param[in] fail_role Resource's role after failure of this action * \param[out] last_failure This will be set to the history XML * \param[in,out] on_fail Actual handling of action result */ static void unpack_rsc_op_failure(struct action_history *history, enum action_fail_response config_on_fail, enum rsc_role_e fail_role, xmlNode **last_failure, enum action_fail_response *on_fail) { bool is_probe = false; char *last_change_s = NULL; *last_failure = history->xml; is_probe = pcmk_xe_is_probe(history->xml); last_change_s = last_change_str(history->xml); if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster) && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { crm_trace("Unexpected result (%s%s%s) was recorded for " "%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s", services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pcmk__node_name(history->node), last_change_s, history->exit_status, history->id); } else { pcmk__sched_warn("Unexpected result (%s%s%s) was recorded for %s of " "%s on %s at %s " CRM_XS " exit-status=%d id=%s", services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), (is_probe? "probe" : history->task), history->rsc->id, pcmk__node_name(history->node), last_change_s, history->exit_status, history->id); if (is_probe && (history->exit_status != PCMK_OCF_OK) && (history->exit_status != PCMK_OCF_NOT_RUNNING) && (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) { /* A failed (not just unexpected) probe result could mean the user * didn't know resources will be probed even where they can't run. */ crm_notice("If it is not possible for %s to run on %s, see " "the " PCMK_XA_RESOURCE_DISCOVERY " option for location " "constraints", history->rsc->id, pcmk__node_name(history->node)); } record_failed_op(history); } free(last_change_s); if (cmp_on_fail(*on_fail, config_on_fail) < 0) { pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s", pcmk_on_fail_text(*on_fail), pcmk_on_fail_text(config_on_fail), history->key); *on_fail = config_on_fail; } if (strcmp(history->task, PCMK_ACTION_STOP) == 0) { resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY, "__stop_fail__", history->rsc->cluster); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) { unpack_migrate_to_failure(history); } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) { unpack_migrate_from_failure(history); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->role = pcmk_role_promoted; } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) { if (config_on_fail == pcmk_on_fail_block) { history->rsc->role = pcmk_role_promoted; pe__set_next_role(history->rsc, pcmk_role_stopped, "demote with on-fail=block"); } else if (history->exit_status == PCMK_OCF_NOT_RUNNING) { history->rsc->role = pcmk_role_stopped; } else { /* Staying in the promoted role would put the scheduler and * controller into a loop. Setting the role to unpromoted is not * dangerous because the resource will be stopped as part of * recovery, and any promotion will be ordered after that stop. */ history->rsc->role = pcmk_role_unpromoted; } } if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) { /* leave stopped */ pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id); history->rsc->role = pcmk_role_stopped; } else if (history->rsc->role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id); set_active(history->rsc); } pcmk__rsc_trace(history->rsc, "Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s", history->rsc->id, pcmk_role_text(history->rsc->role), pcmk__btoa(history->node->details->unclean), pcmk_on_fail_text(config_on_fail), pcmk_role_text(fail_role)); if ((fail_role != pcmk_role_started) && (history->rsc->next_role < fail_role)) { pe__set_next_role(history->rsc, fail_role, "failure"); } if (fail_role == pcmk_role_stopped) { ban_from_all_nodes(history->rsc); } } /*! * \internal * \brief Block a resource with a failed action if it cannot be recovered * * If resource action is a failed stop and fencing is not possible, mark the * resource as unmanaged and blocked, since recovery cannot be done. * * \param[in,out] history Parsed action history entry */ static void block_if_unrecoverable(struct action_history *history) { char *last_change_s = NULL; if (strcmp(history->task, PCMK_ACTION_STOP) != 0) { return; // All actions besides stop are always recoverable } if (pe_can_fence(history->node->details->data_set, history->node)) { return; // Failed stops are recoverable via fencing } last_change_s = last_change_str(history->xml); pcmk__sched_err("No further recovery can be attempted for %s " "because %s on %s failed (%s%s%s) at %s " CRM_XS " rc=%d id=%s", history->rsc->id, history->task, pcmk__node_name(history->node), services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, ""), last_change_s, history->exit_status, history->id); free(last_change_s); pcmk__clear_rsc_flags(history->rsc, pcmk_rsc_managed); pcmk__set_rsc_flags(history->rsc, pcmk_rsc_blocked); } /*! * \internal * \brief Update action history's execution status and why * * \param[in,out] history Parsed action history entry * \param[out] why Where to store reason for update * \param[in] value New value * \param[in] reason Description of why value was changed */ static inline void remap_because(struct action_history *history, const char **why, int value, const char *reason) { if (history->execution_status != value) { history->execution_status = value; *why = reason; } } /*! * \internal * \brief Remap informational monitor results and operation status * * For the monitor results, certain OCF codes are for providing extended information * to the user about services that aren't yet failed but not entirely healthy either. * These must be treated as the "normal" result by Pacemaker. * * For operation status, the action result can be used to determine an appropriate * status for the purposes of responding to the action. The status provided by the * executor is not directly usable since the executor does not know what was expected. * * \param[in,out] history Parsed action history entry * \param[in,out] on_fail What should be done about the result * \param[in] expired Whether result is expired * * \note If the result is remapped and the node is not shutting down or failed, * the operation will be recorded in the scheduler data's list of failed * operations to highlight it for the user. * * \note This may update the resource's current and next role. */ static void remap_operation(struct action_history *history, enum action_fail_response *on_fail, bool expired) { bool is_probe = false; int orig_exit_status = history->exit_status; int orig_exec_status = history->execution_status; const char *why = NULL; const char *task = history->task; // Remap degraded results to their successful counterparts history->exit_status = pcmk__effective_rc(history->exit_status); if (history->exit_status != orig_exit_status) { why = "degraded result"; if (!expired && (!history->node->details->shutdown || history->node->details->online)) { record_failed_op(history); } } if (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && ((history->execution_status != PCMK_EXEC_DONE) || (history->exit_status != PCMK_OCF_NOT_RUNNING))) { history->execution_status = PCMK_EXEC_DONE; history->exit_status = PCMK_OCF_NOT_RUNNING; why = "equivalent probe result"; } /* If the executor reported an execution status of anything but done or * error, consider that final. But for done or error, we know better whether * it should be treated as a failure or not, because we know the expected * result. */ switch (history->execution_status) { case PCMK_EXEC_DONE: case PCMK_EXEC_ERROR: break; // These should be treated as node-fatal case PCMK_EXEC_NO_FENCE_DEVICE: case PCMK_EXEC_NO_SECRETS: remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "node-fatal error"); goto remap_done; default: goto remap_done; } is_probe = pcmk_xe_is_probe(history->xml); if (is_probe) { task = "probe"; } if (history->expected_exit_status < 0) { /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the * expected exit status in the transition key, which (along with the * similar case of a corrupted transition key in the CIB) will be * reported to this function as -1. Pacemaker 2.0+ does not support * rolling upgrades from those versions or processing of saved CIB files * from those versions, so we do not need to care much about this case. */ remap_because(history, &why, PCMK_EXEC_ERROR, "obsolete history format"); pcmk__config_warn("Expected result not found for %s on %s " "(corrupt or obsolete CIB?)", history->key, pcmk__node_name(history->node)); } else if (history->exit_status == history->expected_exit_status) { remap_because(history, &why, PCMK_EXEC_DONE, "expected result"); } else { remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result"); pcmk__rsc_debug(history->rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)", history->key, pcmk__node_name(history->node), history->expected_exit_status, services_ocf_exitcode_str(history->expected_exit_status), history->exit_status, services_ocf_exitcode_str(history->exit_status), (pcmk__str_empty(history->exit_reason)? "" : ": "), pcmk__s(history->exit_reason, "")); } switch (history->exit_status) { case PCMK_OCF_OK: if (is_probe && (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active on %s at %s", history->rsc->id, pcmk__node_name(history->node), last_change_s); free(last_change_s); } break; case PCMK_OCF_NOT_RUNNING: if (is_probe || (history->expected_exit_status == history->exit_status) || !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) { /* For probes, recurring monitors for the Stopped role, and * unmanaged resources, "not running" is not considered a * failure. */ remap_because(history, &why, PCMK_EXEC_DONE, "exit status"); history->rsc->role = pcmk_role_stopped; *on_fail = pcmk_on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "not running"); } break; case PCMK_OCF_RUNNING_PROMOTED: if (is_probe && (history->exit_status != history->expected_exit_status)) { char *last_change_s = last_change_str(history->xml); remap_because(history, &why, PCMK_EXEC_DONE, "probe"); pcmk__rsc_info(history->rsc, "Probe found %s active and promoted on %s at %s", history->rsc->id, pcmk__node_name(history->node), last_change_s); free(last_change_s); } if (!expired || (history->exit_status == history->expected_exit_status)) { history->rsc->role = pcmk_role_promoted; } break; case PCMK_OCF_FAILED_PROMOTED: if (!expired) { history->rsc->role = pcmk_role_promoted; } remap_because(history, &why, PCMK_EXEC_ERROR, "exit status"); break; case PCMK_OCF_NOT_CONFIGURED: remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status"); break; case PCMK_OCF_UNIMPLEMENT_FEATURE: { guint interval_ms = 0; crm_element_value_ms(history->xml, PCMK_META_INTERVAL, &interval_ms); if (interval_ms == 0) { if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); } else { remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED, "exit status"); } } break; case PCMK_OCF_NOT_INSTALLED: case PCMK_OCF_INVALID_PARAM: case PCMK_OCF_INSUFFICIENT_PRIV: if (!expired) { block_if_unrecoverable(history); } remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status"); break; default: if (history->execution_status == PCMK_EXEC_DONE) { char *last_change_s = last_change_str(history->xml); crm_info("Treating unknown exit status %d from %s of %s " "on %s at %s as failure", history->exit_status, task, history->rsc->id, pcmk__node_name(history->node), last_change_s); remap_because(history, &why, PCMK_EXEC_ERROR, "unknown exit status"); free(last_change_s); } break; } remap_done: if (why != NULL) { pcmk__rsc_trace(history->rsc, "Remapped %s result from [%s: %s] to [%s: %s] " "because of %s", history->key, pcmk_exec_status_str(orig_exec_status), crm_exit_str(orig_exit_status), pcmk_exec_status_str(history->execution_status), crm_exit_str(history->exit_status), why); } } // return TRUE if start or monitor last failure but parameters changed static bool should_clear_for_param_change(const xmlNode *xml_op, const char *task, pcmk_resource_t *rsc, pcmk_node_t *node) { if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) { if (pe__bundle_needs_remote_name(rsc)) { /* We haven't allocated resources yet, so we can't reliably * substitute addr parameters for the REMOTE_CONTAINER_HACK. * When that's needed, defer the check until later. */ pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure, rsc->cluster); } else { pcmk__op_digest_t *digest_data = NULL; digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster); switch (digest_data->rc) { case pcmk__digest_unknown: crm_trace("Resource %s history entry %s on %s" " has no digest to compare", rsc->id, pcmk__xe_history_key(xml_op), node->details->id); break; case pcmk__digest_match: break; default: return TRUE; } } } return FALSE; } // Order action after fencing of remote node, given connection rsc static void order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn, pcmk_scheduler_t *scheduler) { pcmk_node_t *remote_node = pe_find_node(scheduler->nodes, remote_conn->id); if (remote_node) { pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL, FALSE, scheduler); order_actions(fence, action, pcmk__ar_first_implies_then); } } static bool should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task, guint interval_ms, bool is_last_failure) { /* Clearing failures of recurring monitors has special concerns. The * executor reports only changes in the monitor result, so if the * monitor is still active and still getting the same failure result, * that will go undetected after the failure is cleared. * * Also, the operation history will have the time when the recurring * monitor result changed to the given code, not the time when the * result last happened. * * @TODO We probably should clear such failures only when the failure * timeout has passed since the last occurrence of the failed result. * However we don't record that information. We could maybe approximate * that by clearing only if there is a more recent successful monitor or * stop result, but we don't even have that information at this point * since we are still unpacking the resource's operation history. * * This is especially important for remote connection resources with a * reconnect interval, so in that case, we skip clearing failures * if the remote node hasn't been fenced. */ if (rsc->remote_reconnect_ms && pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled) && (interval_ms != 0) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) { pcmk_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id); if (remote_node && !remote_node->details->remote_was_fenced) { if (is_last_failure) { crm_info("Waiting to clear monitor failure for remote node %s" " until fencing has occurred", rsc->id); } return TRUE; } } return FALSE; } /*! * \internal * \brief Check operation age and schedule failure clearing when appropriate * * This function has two distinct purposes. The first is to check whether an * operation history entry is expired (i.e. the resource has a failure timeout, * the entry is older than the timeout, and the resource either has no fail * count or its fail count is entirely older than the timeout). The second is to * schedule fail count clearing when appropriate (i.e. the operation is expired * and either the resource has an expired fail count or the operation is a * last_failure for a remote connection resource with a reconnect interval, * or the operation is a last_failure for a start or monitor operation and the * resource's parameters have changed since the operation). * * \param[in,out] history Parsed action result history * * \return true if operation history entry is expired, otherwise false */ static bool check_operation_expiry(struct action_history *history) { bool expired = false; bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0"); time_t last_run = 0; int unexpired_fail_count = 0; const char *clear_reason = NULL; if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) { pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not expired: " "Not Installed does not expire", history->id, pcmk__node_name(history->node)); return false; // "Not installed" must always be cleared manually } if ((history->rsc->failure_timeout > 0) && (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE, &last_run) == 0)) { /* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a * timestamp */ time_t now = get_effective_time(history->rsc->cluster); time_t last_failure = 0; // Is this particular operation history older than the failure timeout? if ((now >= (last_run + history->rsc->failure_timeout)) && !should_ignore_failure_timeout(history->rsc, history->task, history->interval_ms, is_last_failure)) { expired = true; } // Does the resource as a whole have an unexpired fail count? unexpired_fail_count = pe_get_failcount(history->node, history->rsc, &last_failure, pcmk__fc_effective, history->xml); // Update scheduler recheck time according to *last* failure crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds" " last-failure@%lld", history->id, (long long) last_run, (expired? "" : "not "), (long long) now, unexpired_fail_count, history->rsc->failure_timeout, (long long) last_failure); last_failure += history->rsc->failure_timeout + 1; if (unexpired_fail_count && (now < last_failure)) { pe__update_recheck_time(last_failure, history->rsc->cluster, "fail count expiration"); } } if (expired) { if (pe_get_failcount(history->node, history->rsc, NULL, pcmk__fc_default, history->xml)) { // There is a fail count ignoring timeout if (unexpired_fail_count == 0) { // There is no fail count considering timeout clear_reason = "it expired"; } else { /* This operation is old, but there is an unexpired fail count. * In a properly functioning cluster, this should only be * possible if this operation is not a failure (otherwise the * fail count should be expired too), so this is really just a * failsafe. */ pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Unexpired fail count", history->id, pcmk__node_name(history->node)); expired = false; } } else if (is_last_failure && (history->rsc->remote_reconnect_ms != 0)) { /* Clear any expired last failure when reconnect interval is set, * even if there is no fail count. */ clear_reason = "reconnect interval is set"; } } if (!expired && is_last_failure && should_clear_for_param_change(history->xml, history->task, history->rsc, history->node)) { clear_reason = "resource parameters have changed"; } if (clear_reason != NULL) { pcmk_action_t *clear_op = NULL; // Schedule clearing of the fail count clear_op = pe__clear_failcount(history->rsc, history->node, clear_reason, history->rsc->cluster); if (pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_fencing_enabled) && (history->rsc->remote_reconnect_ms != 0)) { /* If we're clearing a remote connection due to a reconnect * interval, we want to wait until any scheduled fencing * completes. * * We could limit this to remote_node->details->unclean, but at * this point, that's always true (it won't be reliable until * after unpack_node_history() is done). */ crm_info("Clearing %s failure will wait until any scheduled " "fencing of %s completes", history->task, history->rsc->id); order_after_remote_fencing(clear_op, history->rsc, history->rsc->cluster); } } if (expired && (history->interval_ms == 0) && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { switch (history->exit_status) { case PCMK_OCF_OK: case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_PROMOTED: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_PROMOTED: // Don't expire probes that return these values pcmk__rsc_trace(history->rsc, "Resource history entry %s on %s is not " "expired: Probe result", history->id, pcmk__node_name(history->node)); expired = false; break; } } return expired; } int pe__target_rc_from_xml(const xmlNode *xml_op) { int target_rc = 0; const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY); if (key == NULL) { return -1; } decode_transition_key(key, NULL, NULL, NULL, &target_rc); return target_rc; } /*! * \internal * \brief Update a resource's state for an action result * * \param[in,out] history Parsed action history entry * \param[in] exit_status Exit status to base new state on * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void update_resource_state(struct action_history *history, int exit_status, const xmlNode *last_failure, enum action_fail_response *on_fail) { bool clear_past_failure = false; if ((exit_status == PCMK_OCF_NOT_INSTALLED) || (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml))) { history->rsc->role = pcmk_role_stopped; } else if (exit_status == PCMK_OCF_NOT_RUNNING) { clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) { if ((last_failure != NULL) && pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure), pcmk__str_none)) { clear_past_failure = true; } if (history->rsc->role < pcmk_role_started) { set_active(history->rsc); } } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) { history->rsc->role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) { history->rsc->role = pcmk_role_stopped; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) { history->rsc->role = pcmk_role_promoted; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE, pcmk__str_none)) { if (*on_fail == pcmk_on_fail_demote) { /* Demote clears an error only if * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE */ clear_past_failure = true; } history->rsc->role = pcmk_role_unpromoted; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_none)) { history->rsc->role = pcmk_role_started; clear_past_failure = true; } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)) { unpack_migrate_to_success(history); } else if (history->rsc->role < pcmk_role_started) { pcmk__rsc_trace(history->rsc, "%s active on %s", history->rsc->id, pcmk__node_name(history->node)); set_active(history->rsc); } if (!clear_past_failure) { return; } switch (*on_fail) { case pcmk_on_fail_stop: case pcmk_on_fail_ban: case pcmk_on_fail_standby_node: case pcmk_on_fail_fence_node: pcmk__rsc_trace(history->rsc, "%s (%s) is not cleared by a completed %s", history->rsc->id, pcmk_on_fail_text(*on_fail), history->task); break; case pcmk_on_fail_block: case pcmk_on_fail_ignore: case pcmk_on_fail_demote: case pcmk_on_fail_restart: case pcmk_on_fail_restart_container: *on_fail = pcmk_on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures"); break; case pcmk_on_fail_reset_remote: if (history->rsc->remote_reconnect_ms == 0) { /* With no reconnect interval, the connection is allowed to * start again after the remote node is fenced and * completely stopped. (With a reconnect interval, we wait * for the failure to be cleared entirely before attempting * to reconnect.) */ *on_fail = pcmk_on_fail_ignore; pe__set_next_role(history->rsc, pcmk_role_unknown, "clear past failures and reset remote"); } break; } } /*! * \internal * \brief Check whether a given history entry matters for resource state * * \param[in] history Parsed action history entry * * \return true if action can affect resource state, otherwise false */ static inline bool can_affect_state(struct action_history *history) { #if 0 /* @COMPAT It might be better to parse only actions we know we're interested * in, rather than exclude a couple we don't. However that would be a * behavioral change that should be done at a major or minor series release. * Currently, unknown operations can affect whether a resource is considered * active and/or failed. */ return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR, PCMK_ACTION_START, PCMK_ACTION_STOP, PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM, "asyncmon", NULL); #else return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY, PCMK_ACTION_META_DATA, NULL); #endif } /*! * \internal * \brief Unpack execution/exit status and exit reason from a history entry * * \param[in,out] history Action history entry to unpack * * \return Standard Pacemaker return code */ static int unpack_action_result(struct action_history *history) { if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS, &(history->execution_status)) < 0) || (history->execution_status < PCMK_EXEC_PENDING) || (history->execution_status > PCMK_EXEC_MAX) || (history->execution_status == PCMK_EXEC_CANCELLED)) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " PCMK__XA_OP_STATUS " '%s'", history->id, history->rsc->id, pcmk__node_name(history->node), pcmk__s(crm_element_value(history->xml, PCMK__XA_OP_STATUS), "")); return pcmk_rc_unpack_error; } if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE, &(history->exit_status)) < 0) || (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) { #if 0 /* @COMPAT We should ignore malformed entries, but since that would * change behavior, it should be done at a major or minor series * release. */ pcmk__config_err("Ignoring resource history entry %s for %s on %s " "with invalid " PCMK__XA_RC_CODE " '%s'", history->id, history->rsc->id, pcmk__node_name(history->node), pcmk__s(crm_element_value(history->xml, PCMK__XA_RC_CODE), "")); return pcmk_rc_unpack_error; #else history->exit_status = CRM_EX_ERROR; #endif } history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON); return pcmk_rc_ok; } /*! * \internal * \brief Process an action history entry whose result expired * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * * \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the * entry needs no further processing) */ static int process_expired_result(struct action_history *history, int orig_exit_status) { if (!pcmk__is_bundled(history->rsc) && pcmk_xe_mask_probe_failure(history->xml) && (orig_exit_status != history->expected_exit_status)) { if (history->rsc->role <= pcmk_role_stopped) { history->rsc->role = pcmk_role_unknown; } crm_trace("Ignoring resource history entry %s for probe of %s on %s: " "Masked failure expired", history->id, history->rsc->id, pcmk__node_name(history->node)); return pcmk_rc_ok; } if (history->exit_status == history->expected_exit_status) { return pcmk_rc_undetermined; // Only failures expire } if (history->interval_ms == 0) { crm_notice("Ignoring resource history entry %s for %s of %s on %s: " "Expired failure", history->id, history->task, history->rsc->id, pcmk__node_name(history->node)); return pcmk_rc_ok; } if (history->node->details->online && !history->node->details->unclean) { /* Reschedule the recurring action. schedule_cancel() won't work at * this stage, so as a hacky workaround, forcibly change the restart * digest so pcmk__check_action_config() does what we want later. * * @TODO We should skip this if there is a newer successful monitor. * Also, this causes rescheduling only if the history entry * has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure * scheduler regression test doesn't, but that may not be a * realistic scenario in production). */ crm_notice("Rescheduling %s-interval %s of %s on %s " "after failure expired", pcmk__readable_interval(history->interval_ms), history->task, history->rsc->id, pcmk__node_name(history->node)); crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST, "calculated-failure-timeout"); return pcmk_rc_ok; } return pcmk_rc_undetermined; } /*! * \internal * \brief Process a masked probe failure * * \param[in,out] history Parsed action history entry * \param[in] orig_exit_status Action exit status before remapping * \param[in] last_failure Resource's last_failure entry, if known * \param[in,out] on_fail Resource's current failure handling */ static void mask_probe_failure(struct action_history *history, int orig_exit_status, const xmlNode *last_failure, enum action_fail_response *on_fail) { pcmk_resource_t *ban_rsc = history->rsc; if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) { ban_rsc = uber_parent(history->rsc); } crm_notice("Treating probe result '%s' for %s on %s as 'not running'", services_ocf_exitcode_str(orig_exit_status), history->rsc->id, pcmk__node_name(history->node)); update_resource_state(history, history->expected_exit_status, last_failure, on_fail); crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->details->uname); record_failed_op(history); resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY, "masked-probe-failure", history->rsc->cluster); } /*! * \internal Check whether a given failure is for a given pending action * * \param[in] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known * * \return true if \p last_failure is failure of pending action in \p history, * otherwise false * \note Both \p history and \p last_failure must come from the same * \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be * the same. */ static bool failure_is_newer(const struct action_history *history, const xmlNode *last_failure) { guint failure_interval_ms = 0U; long long failure_change = 0LL; long long this_change = 0LL; if (last_failure == NULL) { return false; // Resource has no last_failure entry } if (!pcmk__str_eq(history->task, crm_element_value(last_failure, PCMK_XA_OPERATION), pcmk__str_none)) { return false; // last_failure is for different action } if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL, &failure_interval_ms) != pcmk_ok) || (history->interval_ms != failure_interval_ms)) { return false; // last_failure is for action with different interval } if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE), &this_change, 0LL) != pcmk_rc_ok) || (pcmk__scan_ll(crm_element_value(last_failure, PCMK_XA_LAST_RC_CHANGE), &failure_change, 0LL) != pcmk_rc_ok) || (failure_change < this_change)) { return false; // Failure is not known to be newer } return true; } /*! * \internal * \brief Update a resource's role etc. for a pending action * * \param[in,out] history Parsed history entry for pending action * \param[in] last_failure Resource's last_failure entry, if known */ static void process_pending_action(struct action_history *history, const xmlNode *last_failure) { /* For recurring monitors, a failure is recorded only in RSC_last_failure_0, * and there might be a RSC_monitor_INTERVAL entry with the last successful * or pending result. * * If last_failure contains the failure of the pending recurring monitor * we're processing here, and is newer, the action is no longer pending. * (Pending results have call ID -1, which sorts last, so the last failure * if any should be known.) */ if (failure_is_newer(history, last_failure)) { return; } if (strcmp(history->task, PCMK_ACTION_START) == 0) { pcmk__set_rsc_flags(history->rsc, pcmk_rsc_start_pending); set_active(history->rsc); } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) { history->rsc->role = pcmk_role_promoted; } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) && history->node->details->unclean) { /* A migrate_to action is pending on a unclean source, so force a stop * on the target. */ const char *migrate_target = NULL; pcmk_node_t *target = NULL; migrate_target = crm_element_value(history->xml, PCMK__META_MIGRATE_TARGET); target = pe_find_node(history->rsc->cluster->nodes, migrate_target); if (target != NULL) { stop_action(history->rsc, target, FALSE); } } if (history->rsc->pending_task != NULL) { /* There should never be multiple pending actions, but as a failsafe, * just remember the first one processed for display purposes. */ return; } if (pcmk_is_probe(history->task, history->interval_ms)) { /* Pending probes are currently never displayed, even if pending * operations are requested. If we ever want to change that, * enable the below and the corresponding part of * native.c:native_pending_task(). */ #if 0 history->rsc->pending_task = strdup("probe"); history->rsc->pending_node = history->node; #endif } else { history->rsc->pending_task = strdup(history->task); history->rsc->pending_node = history->node; } } static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op, xmlNode **last_failure, enum action_fail_response *on_fail) { int old_rc = 0; bool expired = false; pcmk_resource_t *parent = rsc; enum rsc_role_e fail_role = pcmk_role_unknown; enum action_fail_response failure_strategy = pcmk_on_fail_restart; struct action_history history = { .rsc = rsc, .node = node, .xml = xml_op, .execution_status = PCMK_EXEC_UNKNOWN, }; CRM_CHECK(rsc && node && xml_op, return); history.id = pcmk__xe_id(xml_op); if (history.id == NULL) { pcmk__config_err("Ignoring resource history entry for %s on %s " "without ID", rsc->id, pcmk__node_name(node)); return; } // Task and interval history.task = crm_element_value(xml_op, PCMK_XA_OPERATION); if (history.task == NULL) { pcmk__config_err("Ignoring resource history entry %s for %s on %s " "without " PCMK_XA_OPERATION, history.id, rsc->id, pcmk__node_name(node)); return; } crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms)); if (!can_affect_state(&history)) { pcmk__rsc_trace(rsc, "Ignoring resource history entry %s for %s on %s " "with irrelevant action '%s'", history.id, rsc->id, pcmk__node_name(node), history.task); return; } if (unpack_action_result(&history) != pcmk_rc_ok) { return; // Error already logged } history.expected_exit_status = pe__target_rc_from_xml(xml_op); history.key = pcmk__xe_history_key(xml_op); crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id)); pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)", history.id, history.task, history.call_id, pcmk__node_name(node), pcmk_exec_status_str(history.execution_status), crm_exit_str(history.exit_status)); if (node->details->unclean) { pcmk__rsc_trace(rsc, "%s is running on %s, which is unclean (further action " "depends on value of stop's on-fail attribute)", rsc->id, pcmk__node_name(node)); } expired = check_operation_expiry(&history); old_rc = history.exit_status; remap_operation(&history, on_fail, expired); if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) { goto done; } if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) { mask_probe_failure(&history, old_rc, *last_failure, on_fail); goto done; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) { parent = uber_parent(rsc); } switch (history.execution_status) { case PCMK_EXEC_PENDING: process_pending_action(&history, *last_failure); goto done; case PCMK_EXEC_DONE: update_resource_state(&history, history.exit_status, *last_failure, on_fail); goto done; case PCMK_EXEC_NOT_INSTALLED: unpack_failure_handling(&history, &failure_strategy, &fail_role); if (failure_strategy == pcmk_on_fail_ignore) { crm_warn("Cannot ignore failed %s of %s on %s: " "Resource agent doesn't exist " CRM_XS " status=%d rc=%d id=%s", history.task, rsc->id, pcmk__node_name(node), history.execution_status, history.exit_status, history.id); /* Also for printing it as "FAILED" by marking it as * pcmk_rsc_failed later */ *on_fail = pcmk_on_fail_ban; } resource_location(parent, node, -PCMK_SCORE_INFINITY, "hard-error", rsc->cluster); unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); goto done; case PCMK_EXEC_NOT_CONNECTED: if (pcmk__is_pacemaker_remote_node(node) && pcmk_is_set(node->details->remote_rsc->flags, pcmk_rsc_managed)) { /* We should never get into a situation where a managed remote * connection resource is considered OK but a resource action * behind the connection gets a "not connected" status. But as a * fail-safe in case a bug or unusual circumstances do lead to * that, ensure the remote connection is considered failed. */ pcmk__set_rsc_flags(node->details->remote_rsc, pcmk_rsc_failed|pcmk_rsc_stop_if_failed); } break; // Not done, do error handling case PCMK_EXEC_ERROR: case PCMK_EXEC_ERROR_HARD: case PCMK_EXEC_ERROR_FATAL: case PCMK_EXEC_TIMEOUT: case PCMK_EXEC_NOT_SUPPORTED: case PCMK_EXEC_INVALID: break; // Not done, do error handling default: // No other value should be possible at this point break; } unpack_failure_handling(&history, &failure_strategy, &fail_role); if ((failure_strategy == pcmk_on_fail_ignore) || ((failure_strategy == pcmk_on_fail_restart_container) && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) { char *last_change_s = last_change_str(xml_op); crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded " CRM_XS " %s", history.task, services_ocf_exitcode_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), rsc->id, pcmk__node_name(node), last_change_s, history.id); free(last_change_s); update_resource_state(&history, history.expected_exit_status, *last_failure, on_fail); crm_xml_add(xml_op, PCMK_XA_UNAME, node->details->uname); pcmk__set_rsc_flags(rsc, pcmk_rsc_ignore_failure); record_failed_op(&history); if ((failure_strategy == pcmk_on_fail_restart_container) && cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) { *on_fail = failure_strategy; } } else { unpack_rsc_op_failure(&history, failure_strategy, fail_role, last_failure, on_fail); if (history.execution_status == PCMK_EXEC_ERROR_HARD) { uint8_t log_level = LOG_ERR; if (history.exit_status == PCMK_OCF_NOT_INSTALLED) { log_level = LOG_NOTICE; } do_crm_log(log_level, "Preventing %s from restarting on %s because " "of hard failure (%s%s%s) " CRM_XS " %s", parent->id, pcmk__node_name(node), services_ocf_exitcode_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, node, -PCMK_SCORE_INFINITY, "hard-error", rsc->cluster); } else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) { pcmk__sched_err("Preventing %s from restarting anywhere because " "of fatal failure (%s%s%s) " CRM_XS " %s", parent->id, services_ocf_exitcode_str(history.exit_status), (pcmk__str_empty(history.exit_reason)? "" : ": "), pcmk__s(history.exit_reason, ""), history.id); resource_location(parent, NULL, -PCMK_SCORE_INFINITY, "fatal-error", rsc->cluster); } } done: pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)", rsc->id, pcmk__node_name(node), history.id, pcmk_role_text(rsc->role), pcmk_role_text(rsc->next_role)); } static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite, pcmk_scheduler_t *scheduler) { const char *cluster_name = NULL; pe_rule_eval_data_t rule_data = { .node_hash = NULL, .now = scheduler->now, .match_data = NULL, .rsc_data = NULL, .op_data = NULL }; pcmk__insert_dup(node->details->attrs, CRM_ATTR_UNAME, node->details->uname); pcmk__insert_dup(node->details->attrs, CRM_ATTR_ID, node->details->id); if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) { scheduler->dc_node = node; node->details->is_dc = TRUE; pcmk__insert_dup(node->details->attrs, CRM_ATTR_IS_DC, PCMK_VALUE_TRUE); } else { pcmk__insert_dup(node->details->attrs, CRM_ATTR_IS_DC, PCMK_VALUE_FALSE); } cluster_name = g_hash_table_lookup(scheduler->config_hash, PCMK_OPT_CLUSTER_NAME); if (cluster_name) { pcmk__insert_dup(node->details->attrs, CRM_ATTR_CLUSTER_NAME, cluster_name); } pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data, node->details->attrs, NULL, overwrite, scheduler); pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_data, node->details->utilization, NULL, FALSE, scheduler); if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL, pcmk__rsc_node_current) == NULL) { const char *site_name = pcmk__node_attr(node, "site-name", NULL, pcmk__rsc_node_current); if (site_name) { pcmk__insert_dup(node->details->attrs, CRM_ATTR_SITE_NAME, site_name); } else if (cluster_name) { /* Default to cluster-name if unset */ pcmk__insert_dup(node->details->attrs, CRM_ATTR_SITE_NAME, cluster_name); } } } static GList * extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter) { int counter = -1; int stop_index = -1; int start_index = -1; xmlNode *rsc_op = NULL; GList *gIter = NULL; GList *op_list = NULL; GList *sorted_op_list = NULL; /* extract operations */ op_list = NULL; sorted_op_list = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) { if (pcmk__xe_is(rsc_op, PCMK__XE_LRM_RSC_OP)) { crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc); crm_xml_add(rsc_op, PCMK_XA_UNAME, node); op_list = g_list_prepend(op_list, rsc_op); } } if (op_list == NULL) { /* if there are no operations, there is nothing to do */ return NULL; } sorted_op_list = g_list_sort(op_list, sort_op_by_callid); /* create active recurring operations as optional */ if (active_filter == FALSE) { return sorted_op_list; } op_list = NULL; calculate_active_ops(sorted_op_list, &start_index, &stop_index); for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) { xmlNode *rsc_op = (xmlNode *) gIter->data; counter++; if (start_index < stop_index) { crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry)); break; } else if (counter < start_index) { crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op)); continue; } op_list = g_list_append(op_list, rsc_op); } g_list_free(sorted_op_list); return op_list; } GList * find_operations(const char *rsc, const char *node, gboolean active_filter, pcmk_scheduler_t *scheduler) { GList *output = NULL; GList *intermediate = NULL; xmlNode *tmp = NULL; xmlNode *status = find_xml_node(scheduler->input, PCMK_XE_STATUS, TRUE); pcmk_node_t *this_node = NULL; xmlNode *node_state = NULL; for (node_state = pcmk__xe_first_child(status); node_state != NULL; node_state = pcmk__xe_next(node_state)) { if (pcmk__xe_is(node_state, PCMK__XE_NODE_STATE)) { const char *uname = crm_element_value(node_state, PCMK_XA_UNAME); if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) { continue; } this_node = pe_find_node(scheduler->nodes, uname); if(this_node == NULL) { CRM_LOG_ASSERT(this_node != NULL); continue; } else if (pcmk__is_pacemaker_remote_node(this_node)) { determine_remote_online_status(scheduler, this_node); } else { determine_online_status(node_state, this_node, scheduler); } if (this_node->details->online || pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) { /* offline nodes run no resources... * unless stonith is enabled in which case we need to * make sure rsc start events happen after the stonith */ xmlNode *lrm_rsc = NULL; tmp = find_xml_node(node_state, PCMK__XE_LRM, FALSE); tmp = find_xml_node(tmp, PCMK__XE_LRM_RESOURCES, FALSE); for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) { if (pcmk__xe_is(lrm_rsc, PCMK__XE_LRM_RESOURCE)) { const char *rsc_id = crm_element_value(lrm_rsc, PCMK_XA_ID); if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) { continue; } intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter); output = g_list_concat(output, intermediate); } } } } } return output; } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 0c990ba862..3dc0e8eb91 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2247 +1,2248 @@ /* * Copyright 2004-2024 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include static GList * build_node_info_list(const pcmk_resource_t *rsc) { GList *retval = NULL; for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) { const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data; for (const GList *iter2 = child->running_on; iter2 != NULL; iter2 = iter2->next) { const pcmk_node_t *node = (const pcmk_node_t *) iter2->data; node_info_t *ni = calloc(1, sizeof(node_info_t)); ni->node_name = node->details->uname; ni->promoted = pcmk_is_set(rsc->flags, pcmk_rsc_promotable) && child->fns->state(child, TRUE) == pcmk_role_promoted; retval = g_list_prepend(retval, ni); } } return retval; } GList * cli_resource_search(pcmk_resource_t *rsc, const char *requested_name, pcmk_scheduler_t *scheduler) { GList *retval = NULL; const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); if (pcmk__is_clone(rsc)) { retval = build_node_info_list(rsc); /* The anonymous clone children's common ID is supplied */ } else if (pcmk__is_clone(parent) && !pcmk_is_set(rsc->flags, pcmk_rsc_unique) && rsc->clone_name && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) { retval = build_node_info_list(parent); } else if (rsc->running_on != NULL) { for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) { pcmk_node_t *node = (pcmk_node_t *) iter->data; node_info_t *ni = calloc(1, sizeof(node_info_t)); ni->node_name = node->details->uname; ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted); retval = g_list_prepend(retval, ni); } } return retval; } // \return Standard Pacemaker return code static int find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, char **value) { int rc = pcmk_rc_ok; xmlNode *xml_search = NULL; GString *xpath = NULL; const char *xpath_base = NULL; if(value) { *value = NULL; } if(the_cib == NULL) { return ENOTCONN; } xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES); if (xpath_base == NULL) { crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)"); return ENOMSG; } xpath = g_string_sized_new(1024); pcmk__g_strcat(xpath, xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL); if (attr_set_type != NULL) { pcmk__g_strcat(xpath, "/", attr_set_type, NULL); if (set_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]", NULL); } } g_string_append(xpath, "//" PCMK_XE_NVPAIR); if (attr_id != NULL && attr_name!= NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "' " "and @" PCMK_XA_NAME "='", attr_name, "']", NULL); } else if (attr_id != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL); } else if (attr_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL); } rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search, cib_sync_call | cib_scope_local | cib_xpath); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { goto done; } crm_log_xml_debug(xml_search, "Match"); if (xml_search->children != NULL) { rc = ENOTUNIQ; pcmk__warn_multiple_name_matches(out, xml_search, attr_name); out->spacer(out); } else if(value) { pcmk__str_update(value, crm_element_value(xml_search, attr)); } done: g_string_free(xpath, TRUE); free_xml(xml_search); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(pcmk__output_t *out, GList /* */ **result, pcmk_resource_t *rsc, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, int depth) { int rc = pcmk_rc_ok; char *lookup_id = clone_strip(rsc->id); /* visit the children */ for(GList *gIter = rsc->children; gIter; gIter = gIter->next) { find_matching_attr_resources_recursive(out, result, (pcmk_resource_t *) gIter->data, attr_set, attr_set_type, attr_id, attr_name, cib, depth+1); /* do it only once for clones */ if (rsc->variant == pcmk_rsc_variant_clone) { break; } } rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, gboolean force) { int rc = pcmk_rc_ok; char *lookup_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(force == TRUE) { return g_list_append(result, rsc); } if ((rsc->parent != NULL) && (rsc->parent->variant == pcmk_rsc_variant_clone)) { int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc != pcmk_rc_ok) { rsc = rsc->parent; out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", cmd, attr_name, rsc->id, rsc_id); } return g_list_append(result, rsc); } else if ((rsc->parent == NULL) && (rsc->children != NULL) && (rsc->variant == pcmk_rsc_variant_clone)) { pcmk_resource_t *child = rsc->children->data; if (child->variant == pcmk_rsc_variant_primitive) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc == pcmk_rc_ok) { rsc = child; out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", attr_name, lookup_id, cmd, rsc_id); } free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(out, &result, rsc, attr_set, attr_set_type, attr_id, attr_name, cib, 0); return result; } // \return Standard Pacemaker return code int cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, int cib_options, gboolean force) { pcmk__output_t *out = rsc->cluster->priv; int rc = pcmk_rc_ok; char *found_attr_id = NULL; GList/**/ *resources = NULL; const char *top_id = pe__const_top_resource(rsc, false)->id; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, pcmk__str_casei)) { if (!force) { rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id, PCMK_XE_META_ATTRIBUTES, attr_set, attr_id, attr_name, &found_attr_id); if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) { out->err(out, "WARNING: There is already a meta attribute " "for '%s' called '%s' (id=%s)", top_id, attr_name, found_attr_id); out->err(out, " Delete '%s' first or use the force option " "to override", found_attr_id); } free(found_attr_id); if (rc == pcmk_rc_ok) { return ENOTUNIQ; } } resources = g_list_append(resources, rsc); } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { crm_xml_add(rsc->xml, attr_name, attr_value); CRM_ASSERT(cib != NULL); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc->xml, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Set attribute: name=%s value=%s", attr_name, attr_value); } return rc; } else { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "update", force); } /* If the user specified attr_set or attr_id, the intent is to modify a * single resource, which will be the last item in the list. */ if ((attr_set != NULL) || (attr_id != NULL)) { GList *last = g_list_last(resources); resources = g_list_remove_link(resources, last); g_list_free(resources); resources = last; } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; char *local_attr_set = NULL; const char *rsc_attr_id = attr_id; const char *rsc_attr_set = attr_set; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; found_attr_id = NULL; rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &found_attr_id); switch (rc) { case pcmk_rc_ok: - crm_debug("Found a match for name=%s: id=%s", + crm_debug("Found a match for name=%s: " PCMK_XA_ID "=%s", attr_name, found_attr_id); rsc_attr_id = found_attr_id; break; case ENXIO: if (rsc_attr_set == NULL) { local_attr_set = crm_strdup_printf("%s-%s", lookup_id, attr_set_type); rsc_attr_set = local_attr_set; } if (rsc_attr_id == NULL) { found_attr_id = crm_strdup_printf("%s-%s", rsc_attr_set, attr_name); rsc_attr_id = found_attr_id; } xml_top = create_xml_node(NULL, (const char *) rsc->xml->name); crm_xml_add(xml_top, PCMK_XA_ID, lookup_id); xml_obj = create_xml_node(xml_top, attr_set_type); crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set); break; default: free(lookup_id); free(found_attr_id); g_list_free(resources); return rc; } xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { - out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s", + out->info(out, "Set '%s' option: " + PCMK_XA_ID "=%s%s%s%s%s value=%s", lookup_id, found_attr_id, ((rsc_attr_set == NULL)? "" : " set="), pcmk__s(rsc_attr_set, ""), ((attr_name == NULL)? "" : " name="), pcmk__s(attr_name, ""), attr_value); } free_xml(xml_top); free(lookup_id); free(found_attr_id); free(local_attr_set); if (recursive && pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) { GList *lpc = NULL; static bool need_init = true; if (need_init) { need_init = false; pcmk__unpack_constraints(rsc->cluster); pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop); } /* We want to set the attribute only on resources explicitly * colocated with this one, so we use rsc->rsc_cons_lhs directly * rather than the with_this_colocations() method. */ pcmk__set_rsc_flags(rsc, pcmk_rsc_detect_loop); for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; crm_debug("Checking %s %d", cons->id, cons->score); if (!pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop) && (cons->score > 0)) { crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, cons->dependent->id); cli_resource_update_attribute(cons->dependent, cons->dependent->id, NULL, attr_set_type, NULL, attr_name, attr_value, recursive, cib, cib_options, force); } } } } g_list_free(resources); return rc; } // \return Standard Pacemaker return code int cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, int cib_options, gboolean force) { pcmk__output_t *out = rsc->cluster->priv; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, pe__const_top_resource(rsc, false)->id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "delete", force); } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { xml_remove_prop(rsc->xml, attr_name); CRM_ASSERT(cib != NULL); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc->xml, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted attribute: %s", attr_name); } return rc; } else { resources = g_list_append(resources, rsc); } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &found_attr_id); switch (rc) { case pcmk_rc_ok: break; case ENXIO: free(lookup_id); rc = pcmk_rc_ok; continue; default: free(lookup_id); g_list_free(resources); return rc; } if (rsc_attr_id == NULL) { rsc_attr_id = found_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); CRM_ASSERT(cib); rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_options); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { - out->info(out, "Deleted '%s' option: id=%s%s%s%s%s", + out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s", lookup_id, found_attr_id, ((attr_set == NULL)? "" : " set="), pcmk__s(attr_set, ""), ((attr_name == NULL)? "" : " name="), pcmk__s(attr_name, "")); } free(lookup_id); free_xml(xml_obj); free(found_attr_id); } g_list_free(resources); return rc; } // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, const char *host_uname, const char *rsc_id, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = scheduler->priv; const char *router_node = host_uname; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; const char *rsc_class = NULL; const char *rsc_provider = NULL; const char *rsc_type = NULL; bool cib_only = false; pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id); if (rsc == NULL) { out->err(out, "Resource %s not found", rsc_id); return ENXIO; } else if (rsc->variant != pcmk_rsc_variant_primitive) { out->err(out, "We can only process primitive resources, not %s", rsc_id); return EINVAL; } rsc_class = crm_element_value(rsc->xml, PCMK_XA_CLASS); rsc_provider = crm_element_value(rsc->xml, PCMK_XA_PROVIDER), rsc_type = crm_element_value(rsc->xml, PCMK_XA_TYPE); if ((rsc_class == NULL) || (rsc_type == NULL)) { out->err(out, "Resource %s does not have a class and type", rsc_id); return EINVAL; } { pcmk_node_t *node = pe_find_node(scheduler->nodes, host_uname); if (node == NULL) { out->err(out, "Node %s not found", host_uname); return pcmk_rc_node_unknown; } if (!(node->details->online)) { if (do_fail_resource) { out->err(out, "Node %s is not online", host_uname); return ENOTCONN; } else { cib_only = true; } } if (!cib_only && pcmk__is_pacemaker_remote_node(node)) { node = pcmk__current_node(node->details->remote_rsc); if (node == NULL) { out->err(out, "No cluster connection to Pacemaker Remote node %s detected", host_uname); return ENOTCONN; } router_node = node->details->uname; } } if (rsc->clone_name) { rsc_api_id = rsc->clone_name; rsc_long_id = rsc->id; } else { rsc_api_id = rsc->id; } if (do_fail_resource) { return pcmk_controld_api_fail(controld_api, host_uname, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type); } else { return pcmk_controld_api_refresh(controld_api, host_uname, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type, cib_only); } } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(const pcmk_resource_t *rsc) { const char *name = (rsc->clone_name? rsc->clone_name : rsc->id); if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) { return strdup(name); } return clone_strip(name); } // \return Standard Pacemaker return code static int clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler); if (rc != pcmk_rc_ok) { return rc; } crm_trace("Processing %d mainloop inputs", pcmk_controld_api_replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", pcmk_controld_api_replies_expected(controld_api)); } return rc; } // \return Standard Pacemaker return code static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, const char *node_name, const char *rsc_id, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler) { int rc = pcmk_rc_ok; const char *failed_value = NULL; const char *failed_id = NULL; char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = pcmk__strkey_table(NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { guint interval_ms = 0U; pcmk_parse_interval_spec(interval_spec, &interval_ms); interval_ms_s = crm_strdup_printf("%u", interval_ms); } for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->failed); xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) { failed_id = crm_element_value(xml_op, PCMK__XA_RSC_ID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { pcmk_resource_t *fail_rsc = NULL; fail_rsc = pe_find_resource_with_flags(scheduler->resources, failed_id, pcmk_rsc_match_history |pcmk_rsc_match_anon_basename); if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) { continue; } } // Host name should always have been provided by this point failed_value = crm_element_value(xml_op, PCMK_XA_UNAME); if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) { continue; } // No operation specified means all operations match if (operation) { failed_value = crm_element_value(xml_op, PCMK_XA_OPERATION); if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = crm_element_value(xml_op, PCMK_META_INTERVAL); if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) { continue; } } g_hash_table_add(rscs, (gpointer) failed_id); } free(interval_ms_s); g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { crm_debug("Erasing failures of %s on %s", failed_id, node_name); rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler); if (rc != pcmk_rc_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } // \return Standard Pacemaker return code static int clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, const pcmk_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pcmk__is_pacemaker_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } // \return Standard Pacemaker return code int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pcmk_scheduler_t *scheduler, gboolean force) { pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_ok; pcmk_node_t *node = NULL; if (rsc == NULL) { return ENXIO; } else if (rsc->children) { for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) { const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data; rc = cli_resource_delete(controld_api, host_uname, child, operation, interval_spec, just_failures, scheduler, force); if (rc != pcmk_rc_ok) { return rc; } } return pcmk_rc_ok; } else if (host_uname == NULL) { GList *lpc = NULL; GList *nodes = g_hash_table_get_values(rsc->known_on); if(nodes == NULL && force) { nodes = pcmk__copy_node_list(scheduler->nodes, false); } else if(nodes == NULL && rsc->exclusive_discover) { GHashTableIter iter; pcmk_node_t *node = NULL; g_hash_table_iter_init(&iter, rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) { if(node->weight >= 0) { nodes = g_list_prepend(nodes, node); } } } else if(nodes == NULL) { nodes = g_hash_table_get_values(rsc->allowed_nodes); } for (lpc = nodes; lpc != NULL; lpc = lpc->next) { node = (pcmk_node_t *) lpc->data; if (node->details->online) { rc = cli_resource_delete(controld_api, node->details->uname, rsc, operation, interval_spec, just_failures, scheduler, force); } if (rc != pcmk_rc_ok) { g_list_free(nodes); return rc; } } g_list_free(nodes); return pcmk_rc_ok; } node = pe_find_node(scheduler->nodes, host_uname); if (node == NULL) { out->err(out, "Unable to clean up %s because node %s not found", rsc->id, host_uname); return ENODEV; } if (!node->details->rsc_discovery_enabled) { out->err(out, "Unable to clean up %s because resource discovery disabled on %s", rsc->id, host_uname); return EOPNOTSUPP; } if (controld_api == NULL) { out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", rsc->id, host_uname); return pcmk_rc_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up %s failures on %s: %s", rsc->id, host_uname, pcmk_rc_str(rc)); return rc; } if (just_failures) { rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation, interval_spec, scheduler); } else { rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler); } if (rc != pcmk_rc_ok) { out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", rsc->id, host_uname, pcmk_rc_str(rc)); } else { out->info(out, "Cleaned up %s on %s", rsc->id, host_uname); } return rc; } // \return Standard Pacemaker return code int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; const char *display_name = node_name? node_name : "all nodes"; if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", display_name); return rc; } if (node_name) { pcmk_node_t *node = pe_find_node(scheduler->nodes, node_name); if (node == NULL) { out->err(out, "Unknown node: %s", node_name); return ENXIO; } if (pcmk__is_pacemaker_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } } rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up all failures on %s: %s", display_name, pcmk_rc_str(rc)); return rc; } if (node_name) { rc = clear_rsc_failures(out, controld_api, node_name, NULL, operation, interval_spec, scheduler); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s", node_name, pcmk_rc_str(rc)); return rc; } } else { for (GList *iter = scheduler->nodes; iter; iter = iter->next) { pcmk_node_t *node = (pcmk_node_t *) iter->data; rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL, operation, interval_spec, scheduler); if (rc != pcmk_rc_ok) { out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s", pcmk_rc_str(rc)); return rc; } } } out->info(out, "Cleaned up all resources on %s", display_name); return rc; } static void check_role(resource_checks_t *checks) { const char *role_s = g_hash_table_lookup(checks->rsc->meta, PCMK_META_TARGET_ROLE); if (role_s == NULL) { return; } switch (pcmk_parse_role(role_s)) { case pcmk_role_stopped: checks->flags |= rsc_remain_stopped; break; case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags, pcmk_rsc_promotable)) { checks->flags |= rsc_unpromotable; } break; default: break; } } static void check_managed(resource_checks_t *checks) { const char *managed_s = g_hash_table_lookup(checks->rsc->meta, PCMK_META_IS_MANAGED); if ((managed_s != NULL) && !crm_is_true(managed_s)) { checks->flags |= rsc_unmanaged; } } static void check_locked(resource_checks_t *checks) { if (checks->rsc->lock_node != NULL) { checks->flags |= rsc_locked; checks->lock_node = checks->rsc->lock_node->details->uname; } } static bool node_is_unhealthy(pcmk_node_t *node) { switch (pe__health_strategy(node->details->data_set)) { case pcmk__health_strategy_none: break; case pcmk__health_strategy_no_red: if (pe__node_health(node) < 0) { return true; } break; case pcmk__health_strategy_only_green: if (pe__node_health(node) <= 0) { return true; } break; case pcmk__health_strategy_progressive: case pcmk__health_strategy_custom: /* @TODO These are finite scores, possibly with rules, and possibly * combining with other scores, so attributing these as a cause is * nontrivial. */ break; } return false; } static void check_node_health(resource_checks_t *checks, pcmk_node_t *node) { if (node == NULL) { GHashTableIter iter; bool allowed = false; bool all_nodes_unhealthy = true; g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { allowed = true; if (!node_is_unhealthy(node)) { all_nodes_unhealthy = false; break; } } if (allowed && all_nodes_unhealthy) { checks->flags |= rsc_node_health; } } else if (node_is_unhealthy(node)) { checks->flags |= rsc_node_health; } } int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { resource_checks_t checks = { .rsc = rsc }; check_role(&checks); check_managed(&checks); check_locked(&checks); check_node_health(&checks, node); return out->message(out, "resource-check-list", &checks); } // \return Standard Pacemaker return code int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, const char *rsc_id, pcmk_scheduler_t *scheduler) { crm_notice("Failing %s on %s", rsc_id, host_uname); return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler); } static GHashTable * generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node, pcmk_scheduler_t *scheduler) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; char *key = NULL; char *value = NULL; combined = pcmk__strkey_table(free, free); params = pe_rsc_params(rsc, node, scheduler); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { pcmk__insert_dup(combined, key, value); } } meta = pcmk__strkey_table(free, free); get_meta_attributes(meta, rsc, node, scheduler); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } bool resource_is_running_on(pcmk_resource_t *rsc, const char *host) { bool found = true; GList *hIter = NULL; GList *hosts = NULL; if (rsc == NULL) { return false; } rsc->fns->location(rsc, &hosts, TRUE); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pcmk_node_t *node = (pcmk_node_t *) hIter->data; if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if (host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = false; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = false; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (rsc->variant == pcmk_rsc_variant_group) { active = g_list_concat(active, get_active_resources(host, rsc->children)); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(pcmk__output_t *out, GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { out->info(out, "%s%s", tag, (const char *)item->data); } } /*! * \internal * \brief Upgrade XML to latest schema version and use it as scheduler input * * This also updates the scheduler timestamp to the current time. * * \param[in,out] scheduler Scheduler data to update * \param[in,out] xml XML to use as input * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for * scheduler->now. * \todo This follows the example of other callers of cli_config_update() * and returns ENOKEY ("Required key not available") if that fails, * but perhaps pcmk_rc_schema_validation would be better in that case. */ int update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml) { if (cli_config_update(xml, NULL, FALSE) == FALSE) { return ENOKEY; } scheduler->input = *xml; scheduler->now = crm_time_new(NULL); return pcmk_rc_ok; } /*! * \internal * \brief Update scheduler XML input based on a CIB query * * \param[in] scheduler Scheduler data to initialize * \param[in] cib Connection to the CIB manager * * \return Standard Pacemaker return code * \note On success, caller is responsible for freeing memory allocated for * scheduler->input and scheduler->now. */ static int update_scheduler_input_to_cib(pcmk__output_t *out, pcmk_scheduler_t *scheduler, cib_t *cib) { xmlNode *cib_xml_copy = NULL; int rc = pcmk_rc_ok; rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_rc_str(rc), rc); return rc; } rc = update_scheduler_input(scheduler, &cib_xml_copy); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML"); free_xml(cib_xml_copy); return rc; } return rc; } // \return Standard Pacemaker return code static int update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; pcmk__output_t *out = scheduler->priv; pe_reset_working_set(scheduler); pcmk__set_scheduler_flags(scheduler, pcmk_sched_no_counts|pcmk_sched_no_compat); rc = update_scheduler_input_to_cib(out, scheduler, cib); if (rc != pcmk_rc_ok) { return rc; } if(simulate) { bool prev_quiet = false; pid = pcmk__getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { out->err(out, "Could not create shadow cib: '%s'", pid); rc = ENXIO; goto done; } rc = pcmk__xml_write_file(scheduler->input, shadow_file, false, NULL); if (rc != pcmk_rc_ok) { out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc)); goto done; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not connect to shadow cib: %s", pcmk_rc_str(rc)); goto done; } pcmk__schedule_actions(scheduler->input, pcmk_sched_no_counts|pcmk_sched_no_compat, scheduler); prev_quiet = out->is_quiet(out); out->quiet = true; pcmk__simulate_transition(scheduler, shadow_cib, NULL); out->quiet = prev_quiet; rc = update_dataset(shadow_cib, scheduler, false); } else { cluster_status(scheduler); } done: // Do not free scheduler->input here, we need rsc->xml to be valid later on cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } /*! * \internal * \brief Find the maximum stop timeout of a resource and its children (if any) * * \param[in,out] rsc Resource to get timeout for * * \return Maximum stop timeout for \p rsc (in milliseconds) */ static int max_rsc_stop_timeout(pcmk_resource_t *rsc) { long long result_ll; int max_delay = 0; xmlNode *config = NULL; GHashTable *meta = NULL; if (rsc == NULL) { return 0; } // If resource is collective, use maximum of its children's stop timeouts if (rsc->children != NULL) { for (GList *iter = rsc->children; iter; iter = iter->next) { pcmk_resource_t *child = iter->data; int delay = max_rsc_stop_timeout(child); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Maximum stop timeout for %s is now %s " "due to %s", rsc->id, pcmk__readable_interval(delay), child->id); max_delay = delay; } } return max_delay; } // Get resource's stop action configuration from CIB config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true); /* Get configured timeout for stop action (fully evaluated for rules, * defaults, etc.). * * @TODO This currently ignores node (which might matter for rules) */ meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0) && (result_ll <= INT_MAX)) { max_delay = (int) result_ll; } g_hash_table_destroy(meta); return max_delay; } /*! * \internal * \brief Find a reasonable waiting time for stopping any one resource in a list * * \param[in,out] scheduler Scheduler data * \param[in] resources List of names of resources that will be stopped * * \return Rough estimate of a reasonable time to wait (in seconds) to stop any * one resource in \p resources * \note This estimate is very rough, simply the maximum stop timeout of all * given resources and their children, plus a small fudge factor. It does * not account for children that must be stopped in sequence, action * throttling, or any demotions needed. It checks the stop timeout, even * if the resources in question are actually being started. */ static int wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources) { int max_delay = 0; // Find maximum stop timeout in milliseconds for (const GList *item = resources; item != NULL; item = item->next) { pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, (const char *) item->data); int delay = max_rsc_stop_timeout(rsc); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Wait time is now %s due to %s", pcmk__readable_interval(delay), rsc->id); max_delay = delay; } } return (max_delay / 1000) + 5; } #define waiting_for_starts(d, r, h) ((d != NULL) || \ (!resource_is_running_on((r), (h)))) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in,out] out Output object * \param[in,out] rsc The resource to restart * \param[in] node Node to restart resource on (NULL for all) * \param[in] move_lifetime If not NULL, how long constraint should * remain in effect (as ISO 8601 string) * \param[in] timeout_ms Consider failed if actions do not complete * in this time (specified in milliseconds, * but a two-second granularity is actually * used; if 0, it will be calculated based on * the resource timeout) * \param[in,out] cib Connection to the CIB manager * \param[in] cib_options Group of enum cib_call_options flags to * use with CIB calls * \param[in] promoted_role_only If true, limit to promoted instances * \param[in] force If true, apply only to requested instance * if part of a collective resource * * \return Standard Pacemaker return code (exits on certain failures) */ int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, const pcmk_node_t *node, const char *move_lifetime, int timeout_ms, cib_t *cib, int cib_options, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; int lpc = 0; int before = 0; int step_timeout_s = 0; int sleep_interval = 2; int timeout = timeout_ms / 1000; bool stop_via_ban = false; char *rsc_id = NULL; char *lookup_id = NULL; char *orig_target_role = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pcmk_scheduler_t *scheduler = NULL; pcmk_resource_t *parent = uber_parent(rsc); bool running = false; const char *id = rsc->clone_name ? rsc->clone_name : rsc->id; const char *host = node ? node->details->uname : NULL; /* If the implicit resource or primitive resource of a bundle is given, operate on the * bundle itself instead. */ if (pcmk__is_bundled(rsc)) { rsc = parent->parent; } running = resource_is_running_on(rsc, host); if (pcmk__is_clone(parent) && !running) { if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } rsc = parent->fns->find_rsc(parent, lookup_id, node, pcmk_rsc_match_basename |pcmk_rsc_match_current_node); free(lookup_id); running = resource_is_running_on(rsc, host); } if (!running) { if (host) { out->err(out, "%s is not running on %s and so cannot be restarted", id, host); } else { out->err(out, "%s is not running anywhere and so cannot be restarted", id); } return ENXIO; } if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) { out->err(out, "Unmanaged resources cannot be restarted."); return EAGAIN; } rsc_id = strdup(rsc->id); if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } if (host) { if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) { stop_via_ban = true; } else if (pcmk__is_clone(parent)) { stop_via_ban = true; free(lookup_id); lookup_id = strdup(parent->id); } } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ scheduler = pe_new_working_set(); if (scheduler == NULL) { rc = errno; out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } scheduler->priv = out; rc = update_dataset(cib, scheduler, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc); goto done; } restart_target_active = get_active_resources(host, scheduler->resources); current_active = get_active_resources(host, scheduler->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib, cib_options, promoted_role_only, PCMK__ROLE_PROMOTED); } else { /* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped. * Remember any existing PCMK_META_TARGET_ROLE so we can restore it * later (though it only makes any difference if it's Unpromoted). */ find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL, PCMK_META_TARGET_ROLE, &orig_target_role); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, PCMK_ACTION_STOPPED, FALSE, cib, cib_options, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); restart_target_active = NULL; } goto done; } rc = update_dataset(cib, scheduler, true); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } target_active = get_active_resources(host, scheduler->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, scheduler, FALSE); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; } if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, scheduler->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_options, force); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_options, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); goto done; } if (target_active != NULL) { g_list_free_full(target_active, free); } target_active = restart_target_active; list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, scheduler, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(NULL, scheduler->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } rc = pcmk_rc_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_options, force); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_options, force); } done: if (list_delta != NULL) { g_list_free(list_delta); } if (current_active != NULL) { g_list_free_full(current_active, free); } if (target_active != NULL && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); } free(rsc_id); free(lookup_id); pe_free_working_set(scheduler); return rc; } static inline bool action_is_pending(const pcmk_action_t *action) { if (pcmk_any_flags_set(action->flags, pcmk_action_optional|pcmk_action_pseudo) || !pcmk_is_set(action->flags, pcmk_action_runnable) || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) { return false; } return true; } /*! * \internal * \brief Check whether any actions in a list are pending * * \param[in] actions List of actions to check * * \return true if any actions in the list are pending, otherwise false */ static bool actions_are_pending(const GList *actions) { for (const GList *action = actions; action != NULL; action = action->next) { const pcmk_action_t *a = (const pcmk_action_t *) action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags); return true; } } return false; } static void print_pending_actions(pcmk__output_t *out, GList *actions) { GList *action; out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { pcmk_action_t *a = (pcmk_action_t *) action->data; if (!action_is_pending(a)) { continue; } if (a->node) { out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, pcmk__node_name(a->node)); } else { out->info(out, "\tAction %d: %s", a->id, a->uuid); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in,out] out Output object * \param[in] timeout_ms Consider failed if actions do not complete in * this time (specified in milliseconds, but * one-second granularity is actually used; if 0, a * default will be used) * \param[in,out] cib Connection to the CIB manager * * \return Standard Pacemaker return code */ int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib) { pcmk_scheduler_t *scheduler = NULL; xmlXPathObjectPtr search; int rc = pcmk_rc_ok; bool pending_unknown_state_resources; int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S; time_t expire_time = time(NULL) + timeout_s; time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet char *xpath = NULL; scheduler = pe_new_working_set(); if (scheduler == NULL) { return ENOMEM; } xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS "/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES "/" PCMK__XE_LRM_RESOURCE "/" PCMK__XE_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "='%d']", PCMK_OCF_UNKNOWN); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff <= 0) { print_pending_actions(out, scheduler->actions); rc = ETIME; break; } crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff); if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pe_reset_working_set(scheduler); rc = update_scheduler_input_to_cib(out, scheduler, cib); if (rc != pcmk_rc_ok) { break; } pcmk__schedule_actions(scheduler->input, pcmk_sched_no_counts|pcmk_sched_no_compat, scheduler); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = g_hash_table_lookup(scheduler->config_hash, PCMK_OPT_DC_VERSION); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { out->info(out, "warning: wait option may not work properly in " "mixed-version cluster"); printed_version_warning = true; } } search = xpath_search(scheduler->input, xpath); pending_unknown_state_resources = (numXpathResults(search) > 0); freeXpathObject(search); } while (actions_are_pending(scheduler->actions) || pending_unknown_state_resources); pe_free_working_set(scheduler); free(xpath); return rc; } static const char * get_action(const char *rsc_action) { const char *action = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { action = PCMK_ACTION_VALIDATE_ALL; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { action = PCMK_ACTION_MONITOR; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", "force-demote", "force-promote", NULL)) { action = rsc_action+6; } else { action = rsc_action; } return action; } /*! * \brief Set up environment variables as expected by resource agents * * When the cluster executes resource agents, it adds certain environment * variables (directly or via resource meta-attributes) expected by some * resource agents. Add the essential ones that many resource agents expect, so * the behavior is the same for command-line execution. * * \param[in,out] params Resource parameters that will be passed to agent * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] check_level OCF check level * \param[in] verbosity Verbosity level */ static void set_agent_environment(GHashTable *params, int timeout_ms, int check_level, int verbosity) { g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT), crm_strdup_printf("%d", timeout_ms)); pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); if (check_level >= 0) { char *level = crm_strdup_printf("%d", check_level); setenv("OCF_CHECK_LEVEL", level, 1); free(level); } pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true); if (verbosity > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); } /*! * \internal * \brief Apply command-line overrides to resource parameters * * \param[in,out] params Parameters to be passed to agent * \param[in] overrides Parameters to override (or NULL if none) */ static void apply_overrides(GHashTable *params, GHashTable *overrides) { if (overrides != NULL) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { pcmk__insert_dup(params, name, value); } } } crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, int timeout_ms, int resource_verbose, gboolean force, int check_level) { const char *class = rsc_class; const char *action = get_action(rsc_action); crm_exit_t exit_code = CRM_EX_OK; svc_action_t *op = NULL; // If no timeout was provided, use the same default as the cluster if (timeout_ms == 0) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } set_agent_environment(params, timeout_ms, check_level, resource_verbose); apply_overrides(params, override_hash); op = services__create_resource_action(rsc_name? rsc_name : "test", rsc_class, rsc_prov, rsc_type, action, 0, timeout_ms, params, 0); if (op == NULL) { out->err(out, "Could not execute %s using %s%s%s:%s: %s", action, rsc_class, (rsc_prov? ":" : ""), (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM)); g_hash_table_destroy(params); return CRM_EX_OSERR; } if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { class = resources_find_service_class(rsc_type); } if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF, PCMK_RESOURCE_CLASS_LSB, NULL)) { services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR, "Manual execution of the %s standard is " "unsupported", pcmk__s(class, "unspecified")); } if (op->rc != PCMK_OCF_UNKNOWN) { exit_code = op->rc; goto done; } services_action_sync(op); // Map results to OCF codes for consistent reporting to user { enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc); // Cast variable instead of function return to keep compilers happy exit_code = (crm_exit_t) ocf_code; } done: out->message(out, "resource-agent-action", resource_verbose, rsc_class, rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, exit_code, op->status, services__exit_reason(op), op->stdout_data, op->stderr_data); services_action_free(op); return exit_code; } crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, int timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler, int resource_verbose, gboolean force, int check_level) { pcmk__output_t *out = scheduler->priv; crm_exit_t exit_code = CRM_EX_OK; const char *rid = NULL; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; GHashTable *params = NULL; if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { if (pcmk__is_clone(rsc)) { GList *nodes = cli_resource_search(rsc, requested_name, scheduler); if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", rsc_action, rsc->id); out->err(out, "Try setting " PCMK_META_TARGET_ROLE "=" PCMK__ROLE_STOPPED " first or specifying the force option"); return CRM_EX_UNSAFE; } g_list_free_full(nodes, free); } } if (pcmk__is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->children->data; } if (rsc->variant == pcmk_rsc_variant_group) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } else if (pcmk__is_bundled(rsc)) { out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } rclass = crm_element_value(rsc->xml, PCMK_XA_CLASS); rprov = crm_element_value(rsc->xml, PCMK_XA_PROVIDER); rtype = crm_element_value(rsc->xml, PCMK_XA_TYPE); params = generate_resource_params(rsc, NULL /* @TODO use local node */, scheduler); if (timeout_ms == 0) { timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), scheduler); } rid = pcmk__is_anonymous_clone(rsc->parent)? requested_name : rsc->id; exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action, params, override_hash, timeout_ms, resource_verbose, force, check_level); return exit_code; } // \return Standard Pacemaker return code int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id, const char *host_name, const char *move_lifetime, cib_t *cib, int cib_options, pcmk_scheduler_t *scheduler, gboolean promoted_role_only, gboolean force) { pcmk__output_t *out = scheduler->priv; int rc = pcmk_rc_ok; unsigned int count = 0; pcmk_node_t *current = NULL; pcmk_node_t *dest = pe_find_node(scheduler->nodes, host_name); bool cur_is_dest = false; if (dest == NULL) { return pcmk_rc_node_unknown; } if (promoted_role_only && !pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) { const pcmk_resource_t *p = pe__const_top_resource(rsc, false); if (pcmk_is_set(p->flags, pcmk_rsc_promotable)) { out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { out->info(out, "Ignoring --promoted option: %s is not promotable", rsc_id); promoted_role_only = FALSE; } } current = pe__find_active_requires(rsc, &count); if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) { unsigned int promoted_count = 0; pcmk_node_t *promoted_node = NULL; for (const GList *iter = rsc->children; iter; iter = iter->next) { const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data; enum rsc_role_e child_role = child->fns->state(child, TRUE); if (child_role == pcmk_role_promoted) { rsc = child; promoted_node = pcmk__current_node(child); promoted_count++; } } if (promoted_role_only || (promoted_count != 0)) { count = promoted_count; current = promoted_node; } } if (count > 1) { if (pcmk__is_clone(rsc)) { current = NULL; } else { return pcmk_rc_multiple; } } if (pcmk__same_node(current, dest)) { cur_is_dest = true; if (force) { crm_info("%s is already %s on %s, reinforcing placement with location constraint.", rsc_id, promoted_role_only?"promoted":"active", pcmk__node_name(dest)); } else { return pcmk_rc_already; } } /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, cib_options, false, force); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->details->uname, scheduler->nodes, cib, cib_options, TRUE, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime, cib, cib_options, promoted_role_only, PCMK__ROLE_PROMOTED); crm_trace("%s%s now prefers %s%s", rsc->id, (promoted_role_only? " (promoted)" : ""), pcmk__node_name(dest), force?"(forced)":""); /* only ban the previous location if current location != destination location. * it is possible to use -M to enforce a location without regard of where the * resource is currently located */ if (force && !cur_is_dest) { /* Ban the original location if possible */ if(current) { (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime, cib, cib_options, promoted_role_only, PCMK__ROLE_PROMOTED); } else if(count > 1) { out->info(out, "Resource '%s' is currently %s in %d locations. " "One may now move to %s", rsc_id, (promoted_role_only? "promoted" : "active"), count, pcmk__node_name(dest)); out->info(out, "To prevent '%s' from being %s at a specific location, " "specify a node.", rsc_id, (promoted_role_only? "promoted" : "active")); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; }