diff --git a/daemons/attrd/attrd_attributes.c b/daemons/attrd/attrd_attributes.c index 21444112fe..a808caeb8e 100644 --- a/daemons/attrd/attrd_attributes.c +++ b/daemons/attrd/attrd_attributes.c @@ -1,284 +1,284 @@ /* * Copyright 2013-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" static attribute_t * attrd_create_attribute(xmlNode *xml) { int is_private = 0; long long dampen = 0; const char *name = pcmk__xe_get(xml, PCMK__XA_ATTR_NAME); const char *set_type = pcmk__xe_get(xml, PCMK__XA_ATTR_SET_TYPE); const char *dampen_s = pcmk__xe_get(xml, PCMK__XA_ATTR_DAMPENING); attribute_t *a = NULL; if (set_type == NULL) { set_type = PCMK_XE_INSTANCE_ATTRIBUTES; } /* Set type is meaningful only when writing to the CIB. Private * attributes are not written. */ pcmk__xe_get_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &is_private); if (!is_private && !pcmk__str_any_of(set_type, PCMK_XE_INSTANCE_ATTRIBUTES, PCMK_XE_UTILIZATION, NULL)) { crm_warn("Ignoring attribute %s with invalid set type %s", pcmk__s(name, "(unidentified)"), set_type); return NULL; } a = pcmk__assert_alloc(1, sizeof(attribute_t)); a->id = pcmk__str_copy(name); a->set_type = pcmk__str_copy(set_type); - a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET); - a->user = crm_element_value_copy(xml, PCMK__XA_ATTR_USER); + a->set_id = pcmk__xe_get_copy(xml, PCMK__XA_ATTR_SET); + a->user = pcmk__xe_get_copy(xml, PCMK__XA_ATTR_USER); a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value); if (is_private) { attrd_set_attr_flags(a, attrd_attr_is_private); } if (dampen_s != NULL) { dampen = crm_get_msec(dampen_s); } if (dampen > 0) { a->timeout_ms = (int) QB_MIN(dampen, INT_MAX); a->timer = attrd_add_timer(a->id, a->timeout_ms, a); } else if (dampen < 0) { crm_warn("Ignoring invalid delay %s for attribute %s", dampen_s, a->id); } crm_trace("Created attribute %s with %s write delay and %s CIB user", a->id, ((dampen > 0)? pcmk__readable_interval(a->timeout_ms) : "no"), pcmk__s(a->user, "default")); g_hash_table_replace(attributes, a->id, a); return a; } static int attrd_update_dampening(attribute_t *a, xmlNode *xml, const char *attr) { const char *dvalue = pcmk__xe_get(xml, PCMK__XA_ATTR_DAMPENING); long long dampen = 0; if (dvalue == NULL) { crm_warn("Could not update %s: peer did not specify value for delay", attr); return EINVAL; } dampen = crm_get_msec(dvalue); if (dampen < 0) { crm_warn("Could not update %s: invalid delay value %dms (%s)", attr, dampen, dvalue); return EINVAL; } if (a->timeout_ms != dampen) { mainloop_timer_del(a->timer); a->timeout_ms = (int) QB_MIN(dampen, INT_MAX); if (dampen > 0) { a->timer = attrd_add_timer(attr, a->timeout_ms, a); crm_info("Update attribute %s delay to %dms (%s)", attr, dampen, dvalue); } else { a->timer = NULL; crm_info("Update attribute %s to remove delay", attr); } /* If dampening changed, do an immediate write-out, * otherwise repeated dampening changes would prevent write-outs */ attrd_write_or_elect_attribute(a); } return pcmk_rc_ok; } GHashTable *attributes = NULL; /*! * \internal * \brief Create an XML representation of an attribute for use in peer messages * * \param[in,out] parent Create attribute XML as child element of this * \param[in] a Attribute to represent * \param[in] v Attribute value to represent * \param[in] force_write If true, value should be written even if unchanged * * \return XML representation of attribute */ xmlNode * attrd_add_value_xml(xmlNode *parent, const attribute_t *a, const attribute_value_t *v, bool force_write) { xmlNode *xml = pcmk__xe_create(parent, __func__); crm_xml_add(xml, PCMK__XA_ATTR_NAME, a->id); crm_xml_add(xml, PCMK__XA_ATTR_SET_TYPE, a->set_type); crm_xml_add(xml, PCMK__XA_ATTR_SET, a->set_id); crm_xml_add(xml, PCMK__XA_ATTR_USER, a->user); crm_xml_add(xml, PCMK__XA_ATTR_HOST, v->nodename); /* @COMPAT Prior to 2.1.10 and 3.0.1, the node's cluster ID was added * instead of its XML ID. For Corosync and Pacemaker Remote nodes, those are * the same, but if we ever support node XML IDs that differ from their * cluster IDs, we will have to drop support for rolling upgrades from * versions before those. */ crm_xml_add(xml, PCMK__XA_ATTR_HOST_ID, attrd_get_node_xml_id(v->nodename)); crm_xml_add(xml, PCMK__XA_ATTR_VALUE, v->current); crm_xml_add_int(xml, PCMK__XA_ATTR_DAMPENING, pcmk__timeout_ms2s(a->timeout_ms)); crm_xml_add_int(xml, PCMK__XA_ATTR_IS_PRIVATE, pcmk_is_set(a->flags, attrd_attr_is_private)); crm_xml_add_int(xml, PCMK__XA_ATTR_IS_REMOTE, pcmk_is_set(v->flags, attrd_value_remote)); crm_xml_add_int(xml, PCMK__XA_ATTRD_IS_FORCE_WRITE, force_write); return xml; } void attrd_clear_value_seen(void) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a; attribute_value_t *v = NULL; g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { attrd_clear_value_flags(v, attrd_value_from_peer); } } } attribute_t * attrd_populate_attribute(xmlNode *xml, const char *attr) { attribute_t *a = NULL; bool update_both = false; const char *op = pcmk__xe_get(xml, PCMK_XA_TASK); // NULL because PCMK__ATTRD_CMD_SYNC_RESPONSE has no PCMK_XA_TASK update_both = pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_BOTH, pcmk__str_null_matches); // Look up or create attribute entry a = g_hash_table_lookup(attributes, attr); if (a == NULL) { if (update_both || pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE, pcmk__str_none)) { a = attrd_create_attribute(xml); if (a == NULL) { return NULL; } } else { crm_warn("Could not update %s: attribute not found", attr); return NULL; } } // Update attribute dampening if (update_both || pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_DELAY, pcmk__str_none)) { int rc = attrd_update_dampening(a, xml, attr); if (rc != pcmk_rc_ok || !update_both) { return NULL; } } return a; } /*! * \internal * \brief Get the XML ID used to write out an attribute set * * \param[in] attr Attribute to get set ID for * \param[in] node_state_id XML ID of node state that attribute value is for * * \return Newly allocated string with XML ID to use for \p attr set */ char * attrd_set_id(const attribute_t *attr, const char *node_state_id) { char *set_id = NULL; pcmk__assert((attr != NULL) && (node_state_id != NULL)); if (pcmk__str_empty(attr->set_id)) { /* @COMPAT This should really take the set type into account. Currently * we use the same XML ID for transient attributes and utilization * attributes. It doesn't cause problems because the status section is * not limited by the schema in any way, but it's still unfortunate. * For backward compatibility reasons, we can't change this. */ set_id = crm_strdup_printf("%s-%s", PCMK_XE_STATUS, node_state_id); } else { /* @COMPAT When the user specifies a set ID for an attribute, it is the * same for every node. That is less than ideal, but again, the schema * doesn't enforce anything for the status section. We couldn't change * it without allowing the set ID to vary per value rather than per * attribute, which would break backward compatibility, pose design * challenges, and potentially cause problems in rolling upgrades. */ set_id = pcmk__str_copy(attr->set_id); } pcmk__xml_sanitize_id(set_id); return set_id; } /*! * \internal * \brief Get the XML ID used to write out an attribute value * * \param[in] attr Attribute to get value XML ID for * \param[in] node_state_id UUID of node that attribute value is for * * \return Newly allocated string with XML ID of \p attr value */ char * attrd_nvpair_id(const attribute_t *attr, const char *node_state_id) { char *nvpair_id = NULL; if (attr->set_id != NULL) { nvpair_id = crm_strdup_printf("%s-%s", attr->set_id, attr->id); } else { nvpair_id = crm_strdup_printf(PCMK_XE_STATUS "-%s-%s", node_state_id, attr->id); } pcmk__xml_sanitize_id(nvpair_id); return nvpair_id; } diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c index fdfb108cd9..960c736fdd 100644 --- a/daemons/attrd/attrd_corosync.c +++ b/daemons/attrd/attrd_corosync.c @@ -1,615 +1,615 @@ /* * Copyright 2013-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" static xmlNode * attrd_confirmation(int callid) { xmlNode *node = pcmk__xe_create(NULL, __func__); crm_xml_add(node, PCMK__XA_T, PCMK__VALUE_ATTRD); crm_xml_add(node, PCMK__XA_SRC, pcmk__cluster_local_node_name()); crm_xml_add(node, PCMK_XA_TASK, PCMK__ATTRD_CMD_CONFIRM); crm_xml_add_int(node, PCMK__XA_CALL_ID, callid); return node; } static void attrd_peer_message(pcmk__node_status_t *peer, xmlNode *xml) { const char *election_op = pcmk__xe_get(xml, PCMK__XA_CRM_TASK); if (election_op) { attrd_handle_election_op(peer, xml); return; } if (attrd_shutting_down(false)) { /* If we're shutting down, we want to continue responding to election * ops as long as we're a cluster member (because our vote may be * needed). Ignore all other messages. */ return; } else { pcmk__request_t request = { .ipc_client = NULL, .ipc_id = 0, .ipc_flags = 0, .peer = peer->name, .xml = xml, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, PCMK_XA_TASK); + request.op = pcmk__xe_get_copy(request.xml, PCMK_XA_TASK); CRM_CHECK(request.op != NULL, return); attrd_handle_request(&request); /* Having finished handling the request, check to see if the originating * peer requested confirmation. If so, send that confirmation back now. */ if (pcmk__xe_attr_is_true(xml, PCMK__XA_CONFIRM) && !pcmk__str_eq(request.op, PCMK__ATTRD_CMD_CONFIRM, pcmk__str_none)) { int callid = 0; xmlNode *reply = NULL; /* Add the confirmation ID for the message we are confirming to the * response so the originating peer knows what they're a confirmation * for. */ pcmk__xe_get_int(xml, PCMK__XA_CALL_ID, &callid); reply = attrd_confirmation(callid); /* And then send the confirmation back to the originating peer. This * ends up right back in this same function (attrd_peer_message) on the * peer where it will have to do something with a PCMK__XA_CONFIRM type * message. */ crm_debug("Sending %s a confirmation", peer->name); attrd_send_message(peer, reply, false); pcmk__xml_free(reply); } pcmk__reset_request(&request); } } static void attrd_cpg_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &from); if(data == NULL) { return; } xml = pcmk__xml_parse(data); if (xml == NULL) { crm_err("Bad message received from %s[%u]: '%.120s'", from, nodeid, data); } else { attrd_peer_message(pcmk__get_node(nodeid, from, NULL, pcmk__node_search_cluster_member), xml); } pcmk__xml_free(xml); free(data); } static void attrd_cpg_destroy(gpointer unused) { if (attrd_shutting_down(false)) { crm_info("Disconnected from Corosync process group"); } else { crm_crit("Lost connection to Corosync process group, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } } /*! * \internal * \brief Broadcast an update for a single attribute value * * \param[in] a Attribute to broadcast * \param[in] v Attribute value to broadcast */ void attrd_broadcast_value(const attribute_t *a, const attribute_value_t *v) { xmlNode *op = pcmk__xe_create(NULL, PCMK_XE_OP); crm_xml_add(op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); attrd_add_value_xml(op, a, v, false); attrd_send_message(NULL, op, false); pcmk__xml_free(op); } #define state_text(state) pcmk__s((state), "in unknown state") static void attrd_peer_change_cb(enum pcmk__node_update kind, pcmk__node_status_t *peer, const void *data) { bool gone = false; bool is_remote = pcmk_is_set(peer->flags, pcmk__node_status_remote); switch (kind) { case pcmk__node_update_name: crm_debug("%s node %s is now %s", (is_remote? "Remote" : "Cluster"), peer->name, state_text(peer->state)); break; case pcmk__node_update_processes: if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { gone = true; } crm_debug("Node %s is %s a peer", peer->name, (gone? "no longer" : "now")); break; case pcmk__node_update_state: crm_debug("%s node %s is now %s (was %s)", (is_remote? "Remote" : "Cluster"), peer->name, state_text(peer->state), state_text(data)); if (pcmk__str_eq(peer->state, PCMK_VALUE_MEMBER, pcmk__str_none)) { /* If we're the writer, send new peers a list of all attributes * (unless it's a remote node, which doesn't run its own attrd) */ if (attrd_election_won() && !pcmk_is_set(peer->flags, pcmk__node_status_remote)) { attrd_peer_sync(peer); } } else { // Remove all attribute values associated with lost nodes attrd_peer_remove(peer->name, false, "loss"); gone = true; } break; } // Remove votes from cluster nodes that leave, in case election in progress if (gone && !is_remote) { attrd_remove_voter(peer); attrd_remove_peer_protocol_ver(peer->name); attrd_do_not_expect_from_peer(peer->name); } } #define readable_value(rv_v) pcmk__s((rv_v)->current, "(unset)") #define readable_peer(p) \ (((p) == NULL)? "all peers" : pcmk__s((p)->name, "unknown peer")) static void update_attr_on_host(attribute_t *a, const pcmk__node_status_t *peer, const xmlNode *xml, const char *attr, const char *value, const char *host, bool filter) { int is_remote = 0; bool changed = false; attribute_value_t *v = NULL; const char *prev_xml_id = NULL; const char *node_xml_id = pcmk__xe_get(xml, PCMK__XA_ATTR_HOST_ID); // Create entry for value if not already existing v = g_hash_table_lookup(a->values, host); if (v == NULL) { v = pcmk__assert_alloc(1, sizeof(attribute_value_t)); v->nodename = pcmk__str_copy(host); g_hash_table_replace(a->values, v->nodename, v); } /* If update doesn't contain the node XML ID, fall back to any previously * known value (for logging) */ prev_xml_id = attrd_get_node_xml_id(v->nodename); if (node_xml_id == NULL) { node_xml_id = prev_xml_id; } // If value is for a Pacemaker Remote node, remember that pcmk__xe_get_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); if (is_remote) { attrd_set_value_flags(v, attrd_value_remote); pcmk__assert(pcmk__cluster_lookup_remote_node(host) != NULL); } // Check whether the value changed changed = !pcmk__str_eq(v->current, value, pcmk__str_casei); if (changed && filter && pcmk__str_eq(host, attrd_cluster->priv->node_name, pcmk__str_casei)) { /* Broadcast the local value for an attribute that differs from the * value provided in a peer's attribute synchronization response. This * ensures a node's values for itself take precedence and all peers are * kept in sync. */ v = g_hash_table_lookup(a->values, attrd_cluster->priv->node_name); crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", attr, host, readable_value(v), value, peer->name); attrd_broadcast_value(a, v); } else if (changed) { crm_notice("Setting %s[%s]%s%s: %s -> %s " QB_XS " from %s with %s write delay and node XML ID %s", attr, host, a->set_type ? " in " : "", pcmk__s(a->set_type, ""), readable_value(v), pcmk__s(value, "(unset)"), peer->name, (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms), pcmk__s(node_xml_id, "unknown")); pcmk__str_update(&v->current, value); attrd_set_attr_flags(a, attrd_attr_changed); if (pcmk__str_eq(host, attrd_cluster->priv->node_name, pcmk__str_casei) && pcmk__str_eq(attr, PCMK__NODE_ATTR_SHUTDOWN, pcmk__str_none)) { if (!pcmk__str_eq(value, "0", pcmk__str_null_matches)) { attrd_set_requesting_shutdown(); } else { attrd_clear_requesting_shutdown(); } } // Write out new value or start dampening timer if (a->timeout_ms && a->timer) { crm_trace("Delaying write of %s %s for dampening", attr, pcmk__readable_interval(a->timeout_ms)); mainloop_timer_start(a->timer); } else { attrd_write_or_elect_attribute(a); } } else { int is_force_write = 0; pcmk__xe_get_int(xml, PCMK__XA_ATTRD_IS_FORCE_WRITE, &is_force_write); if (is_force_write == 1 && a->timeout_ms && a->timer) { /* Save forced writing and set change flag. */ /* The actual attribute is written by Writer after election. */ crm_trace("%s[%s] from %s is unchanged (%s), forcing write", attr, host, peer->name, pcmk__s(value, "unset")); attrd_set_attr_flags(a, attrd_attr_force_write); } else { crm_trace("%s[%s] from %s is unchanged (%s)", attr, host, peer->name, pcmk__s(value, "unset")); } } // This allows us to later detect local values that peer doesn't know about attrd_set_value_flags(v, attrd_value_from_peer); // Remember node's XML ID if we're just learning it if ((node_xml_id != NULL) && !pcmk__str_eq(node_xml_id, prev_xml_id, pcmk__str_none)) { crm_trace("Learned %s[%s] node XML ID is %s (was %s)", a->id, v->nodename, node_xml_id, pcmk__s(prev_xml_id, "unknown")); attrd_set_node_xml_id(v->nodename, node_xml_id); if (attrd_election_won()) { // In case we couldn't write a value missing the XML ID before attrd_write_attributes(attrd_write_changed); } } } static void attrd_peer_update_one(const pcmk__node_status_t *peer, xmlNode *xml, bool filter) { attribute_t *a = NULL; const char *attr = pcmk__xe_get(xml, PCMK__XA_ATTR_NAME); const char *value = pcmk__xe_get(xml, PCMK__XA_ATTR_VALUE); const char *host = pcmk__xe_get(xml, PCMK__XA_ATTR_HOST); if (attr == NULL) { crm_warn("Could not update attribute: peer did not specify name"); return; } a = attrd_populate_attribute(xml, attr); if (a == NULL) { return; } if (host == NULL) { // If no host was specified, update all hosts GHashTableIter vIter; crm_debug("Setting %s for all hosts to %s", attr, value); pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_HOST_ID); g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) { update_attr_on_host(a, peer, xml, attr, value, host, filter); } } else { // Update attribute value for the given host update_attr_on_host(a, peer, xml, attr, value, host, filter); } /* If this is a message from some attrd instance broadcasting its protocol * version, check to see if it's a new minimum version. */ if (pcmk__str_eq(attr, CRM_ATTR_PROTOCOL, pcmk__str_none)) { attrd_update_minimum_protocol_ver(peer->name, value); } } static void broadcast_unseen_local_values(void) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = NULL; g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { if (!pcmk_is_set(v->flags, attrd_value_from_peer) && pcmk__str_eq(v->nodename, attrd_cluster->priv->node_name, pcmk__str_casei)) { crm_trace("* %s[%s]='%s' is local-only", a->id, v->nodename, readable_value(v)); if (sync == NULL) { sync = pcmk__xe_create(NULL, __func__); crm_xml_add(sync, PCMK_XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); } attrd_add_value_xml(sync, a, v, a->timeout_ms && a->timer); } } } if (sync != NULL) { crm_debug("Broadcasting local-only values"); attrd_send_message(NULL, sync, false); pcmk__xml_free(sync); } } int attrd_cluster_connect(void) { int rc = pcmk_rc_ok; attrd_cluster = pcmk_cluster_new(); pcmk_cluster_set_destroy_fn(attrd_cluster, attrd_cpg_destroy); pcmk_cpg_set_deliver_fn(attrd_cluster, attrd_cpg_dispatch); pcmk_cpg_set_confchg_fn(attrd_cluster, pcmk__cpg_confchg_cb); pcmk__cluster_set_status_callback(&attrd_peer_change_cb); rc = pcmk_cluster_connect(attrd_cluster); rc = pcmk_rc2legacy(rc); if (rc != pcmk_ok) { crm_err("Cluster connection failed"); return rc; } return pcmk_ok; } void attrd_peer_clear_failure(pcmk__request_t *request) { xmlNode *xml = request->xml; const char *rsc = pcmk__xe_get(xml, PCMK__XA_ATTR_RESOURCE); const char *host = pcmk__xe_get(xml, PCMK__XA_ATTR_HOST); const char *op = pcmk__xe_get(xml, PCMK__XA_ATTR_CLEAR_OPERATION); const char *interval_spec = pcmk__xe_get(xml, PCMK__XA_ATTR_CLEAR_INTERVAL); guint interval_ms = 0U; char *attr = NULL; GHashTableIter iter; regex_t regex; pcmk__node_status_t *peer = pcmk__get_node(0, request->peer, NULL, pcmk__node_search_cluster_member); pcmk_parse_interval_spec(interval_spec, &interval_ms); if (attrd_failure_regex(®ex, rsc, op, interval_ms) != pcmk_ok) { crm_info("Ignoring invalid request to clear failures for %s", pcmk__s(rsc, "all resources")); return; } crm_xml_add(xml, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Make sure value is not set, so we delete */ pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_VALUE); g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) { if (regexec(®ex, attr, 0, NULL, 0) == 0) { crm_trace("Matched %s when clearing %s", attr, pcmk__s(rsc, "all resources")); crm_xml_add(xml, PCMK__XA_ATTR_NAME, attr); attrd_peer_update(peer, xml, host, false); } } regfree(®ex); } /*! * \internal * \brief Load attributes from a peer sync response * * \param[in] peer Peer that sent sync response * \param[in] peer_won Whether peer is the attribute writer * \param[in,out] xml Request XML */ void attrd_peer_sync_response(const pcmk__node_status_t *peer, bool peer_won, xmlNode *xml) { crm_info("Processing " PCMK__ATTRD_CMD_SYNC_RESPONSE " from %s", peer->name); if (peer_won) { /* Initialize the "seen" flag for all attributes to cleared, so we can * detect attributes that local node has but the writer doesn't. */ attrd_clear_value_seen(); } // Process each attribute update in the sync response for (xmlNode *child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { attrd_peer_update(peer, child, pcmk__xe_get(child, PCMK__XA_ATTR_HOST), true); } if (peer_won) { /* If any attributes are still not marked as seen, the writer doesn't * know about them, so send all peers an update with them. */ broadcast_unseen_local_values(); } } /*! * \internal * \brief Remove all attributes and optionally peer cache entries for a node * * \param[in] host Name of node to purge * \param[in] uncache If true, remove node from peer caches * \param[in] source Who requested removal (only used for logging) */ void attrd_peer_remove(const char *host, bool uncache, const char *source) { attribute_t *a = NULL; GHashTableIter aIter; CRM_CHECK(host != NULL, return); crm_notice("Removing all %s attributes for node %s " QB_XS " %s reaping node from cache", host, source, (uncache? "and" : "without")); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { if(g_hash_table_remove(a->values, host)) { crm_debug("Removed %s[%s] for peer %s", a->id, host, source); } } if (uncache) { pcmk__purge_node_from_cache(host, 0); attrd_forget_node_xml_id(host); } } /*! * \internal * \brief Send all known attributes and values to a peer * * \param[in] peer Peer to send sync to (if NULL, broadcast to all peers) */ void attrd_peer_sync(pcmk__node_status_t *peer) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = pcmk__xe_create(NULL, __func__); crm_xml_add(sync, PCMK_XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { crm_debug("Syncing %s[%s]='%s' to %s", a->id, v->nodename, readable_value(v), readable_peer(peer)); attrd_add_value_xml(sync, a, v, false); } } crm_debug("Syncing values to %s", readable_peer(peer)); attrd_send_message(peer, sync, false); pcmk__xml_free(sync); } void attrd_peer_update(const pcmk__node_status_t *peer, xmlNode *xml, const char *host, bool filter) { bool handle_sync_point = false; CRM_CHECK((peer != NULL) && (xml != NULL), return); if (xml->children != NULL) { for (xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, NULL, NULL); child != NULL; child = pcmk__xe_next(child, PCMK_XE_OP)) { pcmk__xe_copy_attrs(child, xml, pcmk__xaf_no_overwrite); attrd_peer_update_one(peer, child, filter); if (attrd_request_has_sync_point(child)) { handle_sync_point = true; } } } else { attrd_peer_update_one(peer, xml, filter); if (attrd_request_has_sync_point(xml)) { handle_sync_point = true; } } /* If the update XML specified that the client wanted to wait for a sync * point, process that now. */ if (handle_sync_point) { crm_trace("Hit local sync point for attribute update"); attrd_ack_waitlist_clients(attrd_sync_point_local, xml); } } diff --git a/daemons/attrd/attrd_ipc.c b/daemons/attrd/attrd_ipc.c index bb03c6659d..f88abf90c4 100644 --- a/daemons/attrd/attrd_ipc.c +++ b/daemons/attrd/attrd_ipc.c @@ -1,629 +1,629 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // PRIu32 #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" static qb_ipcs_service_t *ipcs = NULL; /*! * \internal * \brief Build the XML reply to a client query * * \param[in] attr Name of requested attribute * \param[in] host Name of requested host (or NULL for all hosts) * * \return New XML reply * \note Caller is responsible for freeing the resulting XML */ static xmlNode *build_query_reply(const char *attr, const char *host) { xmlNode *reply = pcmk__xe_create(NULL, __func__); attribute_t *a; crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_ATTRD); crm_xml_add(reply, PCMK__XA_SUBT, PCMK__ATTRD_CMD_QUERY); crm_xml_add(reply, PCMK__XA_ATTR_VERSION, ATTRD_PROTOCOL_VERSION); /* If desired attribute exists, add its value(s) to the reply */ a = g_hash_table_lookup(attributes, attr); if (a) { attribute_value_t *v; xmlNode *host_value; crm_xml_add(reply, PCMK__XA_ATTR_NAME, attr); /* Allow caller to use "localhost" to refer to local node */ if (pcmk__str_eq(host, "localhost", pcmk__str_casei)) { host = attrd_cluster->priv->node_name; crm_trace("Mapped localhost to %s", host); } /* If a specific node was requested, add its value */ if (host) { v = g_hash_table_lookup(a->values, host); host_value = pcmk__xe_create(reply, PCMK_XE_NODE); crm_xml_add(host_value, PCMK__XA_ATTR_HOST, host); crm_xml_add(host_value, PCMK__XA_ATTR_VALUE, (v? v->current : NULL)); /* Otherwise, add all nodes' values */ } else { GHashTableIter iter; g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) { host_value = pcmk__xe_create(reply, PCMK_XE_NODE); crm_xml_add(host_value, PCMK__XA_ATTR_HOST, v->nodename); crm_xml_add(host_value, PCMK__XA_ATTR_VALUE, v->current); } } } return reply; } xmlNode * attrd_client_clear_failure(pcmk__request_t *request) { xmlNode *xml = request->xml; const char *rsc, *op, *interval_spec; if (minimum_protocol_version >= 2) { /* Propagate to all peers (including ourselves). * This ends up at attrd_peer_message(). */ attrd_send_message(NULL, xml, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } rsc = pcmk__xe_get(xml, PCMK__XA_ATTR_RESOURCE); op = pcmk__xe_get(xml, PCMK__XA_ATTR_CLEAR_OPERATION); interval_spec = pcmk__xe_get(xml, PCMK__XA_ATTR_CLEAR_INTERVAL); /* Map this to an update */ crm_xml_add(xml, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Add regular expression matching desired attributes */ if (rsc) { char *pattern; if (op == NULL) { pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc); } else { guint interval_ms = 0U; pcmk_parse_interval_spec(interval_spec, &interval_ms); pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP, rsc, op, interval_ms); } crm_xml_add(xml, PCMK__XA_ATTR_REGEX, pattern); free(pattern); } else { crm_xml_add(xml, PCMK__XA_ATTR_REGEX, ATTRD_RE_CLEAR_ALL); } /* Make sure attribute and value are not set, so we delete via regex */ pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_NAME); pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_VALUE); return attrd_client_update(request); } xmlNode * attrd_client_peer_remove(pcmk__request_t *request) { xmlNode *xml = request->xml; // Host and ID are not used in combination, rather host has precedence const char *host = pcmk__xe_get(xml, PCMK__XA_ATTR_HOST); char *host_alloc = NULL; attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); if (host == NULL) { int nodeid = 0; pcmk__xe_get_int(xml, PCMK__XA_ATTR_HOST_ID, &nodeid); if (nodeid > 0) { pcmk__node_status_t *node = NULL; char *host_alloc = NULL; node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_cluster_member); if ((node != NULL) && (node->name != NULL)) { // Use cached name if available host = node->name; } else { // Otherwise ask cluster layer host_alloc = pcmk__cluster_node_name(nodeid); host = host_alloc; } crm_xml_add(xml, PCMK__XA_ATTR_HOST, host); } } if (host) { crm_info("Client %s is requesting all values for %s be removed", pcmk__client_name(request->ipc_client), host); attrd_send_message(NULL, xml, false); /* ends up at attrd_peer_message() */ free(host_alloc); } else { crm_info("Ignoring request by client %s to remove all peer values without specifying peer", pcmk__client_name(request->ipc_client)); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } xmlNode * attrd_client_query(pcmk__request_t *request) { xmlNode *query = request->xml; xmlNode *reply = NULL; const char *attr = NULL; crm_debug("Query arrived from %s", pcmk__client_name(request->ipc_client)); /* Request must specify attribute name to query */ attr = pcmk__xe_get(query, PCMK__XA_ATTR_NAME); if (attr == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Ignoring malformed query from %s (no attribute name given)", pcmk__client_name(request->ipc_client)); return NULL; } /* Build the XML reply */ reply = build_query_reply(attr, pcmk__xe_get(query, PCMK__XA_ATTR_HOST)); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Could not respond to query from %s: could not create XML reply", pcmk__client_name(request->ipc_client)); return NULL; } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } request->ipc_client->request_id = 0; return reply; } xmlNode * attrd_client_refresh(pcmk__request_t *request) { crm_info("Updating all attributes"); attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); attrd_write_attributes(attrd_write_all|attrd_write_no_delay); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static void handle_missing_host(xmlNode *xml) { if (pcmk__xe_get(xml, PCMK__XA_ATTR_HOST) == NULL) { crm_trace("Inferring local node %s with XML ID %s", attrd_cluster->priv->node_name, attrd_cluster->priv->node_xml_id); crm_xml_add(xml, PCMK__XA_ATTR_HOST, attrd_cluster->priv->node_name); crm_xml_add(xml, PCMK__XA_ATTR_HOST_ID, attrd_cluster->priv->node_xml_id); } } /* Convert a single IPC message with a regex into one with multiple children, one * for each regex match. */ static int expand_regexes(xmlNode *xml, const char *attr, const char *value, const char *regex) { if (attr == NULL && regex) { bool matched = false; GHashTableIter aIter; regex_t r_patt; crm_debug("Setting %s to %s", regex, value); if (regcomp(&r_patt, regex, REG_EXTENDED|REG_NOSUB)) { return EINVAL; } g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, (gpointer *) & attr, NULL)) { int status = regexec(&r_patt, attr, 0, NULL, 0); if (status == 0) { xmlNode *child = pcmk__xe_create(xml, PCMK_XE_OP); crm_trace("Matched %s with %s", attr, regex); matched = true; /* Copy all the non-conflicting attributes from the parent over, * but remove the regex and replace it with the name. */ pcmk__xe_copy_attrs(child, xml, pcmk__xaf_no_overwrite); pcmk__xe_remove_attr(child, PCMK__XA_ATTR_REGEX); crm_xml_add(child, PCMK__XA_ATTR_NAME, attr); } } regfree(&r_patt); /* Return a code if we never matched anything. This should not be treated * as an error. It indicates there was a regex, and it was a valid regex, * but simply did not match anything and the caller should not continue * doing any regex-related processing. */ if (!matched) { return pcmk_rc_op_unsatisfied; } } else if (attr == NULL) { return pcmk_rc_bad_nvpair; } return pcmk_rc_ok; } static int handle_regexes(pcmk__request_t *request) { xmlNode *xml = request->xml; int rc = pcmk_rc_ok; const char *attr = pcmk__xe_get(xml, PCMK__XA_ATTR_NAME); const char *value = pcmk__xe_get(xml, PCMK__XA_ATTR_VALUE); const char *regex = pcmk__xe_get(xml, PCMK__XA_ATTR_REGEX); rc = expand_regexes(xml, attr, value, regex); if (rc == EINVAL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Bad regex '%s' for update from client %s", regex, pcmk__client_name(request->ipc_client)); } else if (rc == pcmk_rc_bad_nvpair) { crm_err("Update request did not specify attribute or regular expression"); pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Client %s update request did not specify attribute or regular expression", pcmk__client_name(request->ipc_client)); } return rc; } static int handle_value_expansion(const char **value, xmlNode *xml, const char *op, const char *attr) { attribute_t *a = g_hash_table_lookup(attributes, attr); if (a == NULL && pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_DELAY, pcmk__str_none)) { return EINVAL; } if (*value && attrd_value_needs_expansion(*value)) { int int_value; attribute_value_t *v = NULL; if (a) { const char *host = pcmk__xe_get(xml, PCMK__XA_ATTR_HOST); v = g_hash_table_lookup(a->values, host); } int_value = attrd_expand_value(*value, (v? v->current : NULL)); crm_info("Expanded %s=%s to %d", attr, *value, int_value); crm_xml_add_int(xml, PCMK__XA_ATTR_VALUE, int_value); /* Replacing the value frees the previous memory, so re-query it */ *value = pcmk__xe_get(xml, PCMK__XA_ATTR_VALUE); } return pcmk_rc_ok; } static void send_update_msg_to_cluster(pcmk__request_t *request, xmlNode *xml) { if (pcmk__str_eq(attrd_request_sync_point(xml), PCMK__VALUE_CLUSTER, pcmk__str_none)) { /* The client is waiting on the cluster-wide sync point. In this case, * the response ACK is not sent until this attrd broadcasts the update * and receives its own confirmation back from all peers. */ attrd_expect_confirmations(request, attrd_cluster_sync_point_update); attrd_send_message(NULL, xml, true); /* ends up at attrd_peer_message() */ } else { /* The client is either waiting on the local sync point or was not * waiting on any sync point at all. For the local sync point, the * response ACK is sent in attrd_peer_update. For clients not * waiting on any sync point, the response ACK is sent in * handle_update_request immediately before this function was called. */ attrd_send_message(NULL, xml, false); /* ends up at attrd_peer_message() */ } } static int send_child_update(xmlNode *child, void *data) { pcmk__request_t *request = (pcmk__request_t *) data; /* Calling pcmk__set_result is handled by one of these calls to * attrd_client_update, so no need to do it again here. */ request->xml = child; attrd_client_update(request); return pcmk_rc_ok; } xmlNode * attrd_client_update(pcmk__request_t *request) { xmlNode *xml = NULL; const char *attr, *value, *regex; CRM_CHECK((request != NULL) && (request->xml != NULL), return NULL); xml = request->xml; /* If the message has children, that means it is a message from a newer * client that supports sending multiple operations at a time. There are * two ways we can handle that. */ if (xml->children != NULL) { if (ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version)) { /* First, if all peers support a certain protocol version, we can * just broadcast the big message and they'll handle it. However, * we also need to apply all the transformations in this function * to the children since they don't happen anywhere else. */ for (xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, NULL, NULL); child != NULL; child = pcmk__xe_next(child, PCMK_XE_OP)) { attr = pcmk__xe_get(child, PCMK__XA_ATTR_NAME); value = pcmk__xe_get(child, PCMK__XA_ATTR_VALUE); handle_missing_host(child); if (handle_value_expansion(&value, child, request->op, attr) == EINVAL) { pcmk__format_result(&request->result, CRM_EX_NOSUCH, PCMK_EXEC_ERROR, "Attribute %s does not exist", attr); return NULL; } } send_update_msg_to_cluster(request, xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { /* Save the original xml node pointer so it can be restored after iterating * over all the children. */ xmlNode *orig_xml = request->xml; /* Second, if they do not support that protocol version, split it * up into individual messages and call attrd_client_update on * each one. */ pcmk__xe_foreach_child(xml, PCMK_XE_OP, send_child_update, request); request->xml = orig_xml; } return NULL; } attr = pcmk__xe_get(xml, PCMK__XA_ATTR_NAME); value = pcmk__xe_get(xml, PCMK__XA_ATTR_VALUE); regex = pcmk__xe_get(xml, PCMK__XA_ATTR_REGEX); if (handle_regexes(request) != pcmk_rc_ok) { /* Error handling was already dealt with in handle_regexes, so just return. */ return NULL; } else if (regex) { /* Recursively call attrd_client_update on the new message with regexes * expanded. If supported by the attribute daemon, this means that all * matches can also be handled atomically. */ return attrd_client_update(request); } handle_missing_host(xml); if (handle_value_expansion(&value, xml, request->op, attr) == EINVAL) { pcmk__format_result(&request->result, CRM_EX_NOSUCH, PCMK_EXEC_ERROR, "Attribute %s does not exist", attr); return NULL; } crm_debug("Broadcasting %s[%s]=%s%s", attr, pcmk__xe_get(xml, PCMK__XA_ATTR_HOST), value, (attrd_election_won()? " (writer)" : "")); send_update_msg_to_cluster(request, xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } /*! * \internal * \brief Accept a new client IPC connection * * \param[in,out] c New connection * \param[in] uid Client user id * \param[in] gid Client group id * * \return pcmk_ok on success, -errno otherwise */ static int32_t attrd_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("New client connection %p", c); if (attrd_shutting_down(false)) { crm_info("Ignoring new connection from pid %d during shutdown", pcmk__client_pid(c)); return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return pcmk_ok; } /*! * \internal * \brief Destroy a client IPC connection * * \param[in] c Connection to destroy * * \return FALSE (i.e. do not re-run this callback) */ static int32_t attrd_ipc_closed(qb_ipcs_connection_t *c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { crm_trace("Ignoring request to clean up unknown connection %p", c); } else { crm_trace("Cleaning up closed client connection %p", c); /* Remove the client from the sync point waitlist if it's present. */ attrd_remove_client_from_waitlist(client); /* And no longer wait for confirmations from any peers. */ attrd_do_not_wait_for_client(client); pcmk__free_client(client); } return FALSE; } /*! * \internal * \brief Destroy a client IPC connection * * \param[in,out] c Connection to destroy * * \note We handle a destroyed connection the same as a closed one, * but we need a separate handler because the return type is different. */ static void attrd_ipc_destroy(qb_ipcs_connection_t *c) { crm_trace("Destroying client connection %p", c); attrd_ipc_closed(c); } static int32_t attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); xmlNode *xml = NULL; // Sanity-check, and parse XML from IPC data CRM_CHECK((c != NULL) && (client != NULL), return 0); if (data == NULL) { crm_debug("No IPC data from PID %d", pcmk__client_pid(c)); return 0; } xml = pcmk__client_data2xml(client, data, &id, &flags); if (xml == NULL) { crm_debug("Unrecognizable IPC data from PID %d", pcmk__client_pid(c)); pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } else { pcmk__request_t request = { .ipc_client = client, .ipc_id = id, .ipc_flags = flags, .peer = NULL, .xml = xml, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; pcmk__assert(client->user != NULL); pcmk__update_acl_user(xml, PCMK__XA_ATTR_USER, client->user); - request.op = crm_element_value_copy(request.xml, PCMK_XA_TASK); + request.op = pcmk__xe_get_copy(request.xml, PCMK_XA_TASK); CRM_CHECK(request.op != NULL, return 0); attrd_handle_request(&request); pcmk__reset_request(&request); } pcmk__xml_free(xml); return 0; } static struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = attrd_ipc_accept, .connection_created = NULL, .msg_process = attrd_ipc_dispatch, .connection_closed = attrd_ipc_closed, .connection_destroyed = attrd_ipc_destroy }; void attrd_ipc_fini(void) { if (ipcs != NULL) { pcmk__drop_all_clients(ipcs); qb_ipcs_destroy(ipcs); ipcs = NULL; } attrd_unregister_handlers(); pcmk__client_cleanup(); } /*! * \internal * \brief Set up attrd IPC communication */ void attrd_init_ipc(void) { pcmk__serve_attrd_ipc(&ipcs, &ipc_callbacks); } diff --git a/daemons/based/based_remote.c b/daemons/based/based_remote.c index 386acbf671..0747fd7b9a 100644 --- a/daemons/based/based_remote.c +++ b/daemons/based/based_remote.c @@ -1,661 +1,661 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include // PRIx64 #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-based.h" #include #include #include #if HAVE_SECURITY_PAM_APPL_H # include # define HAVE_PAM 1 #elif HAVE_PAM_PAM_APPL_H # include # define HAVE_PAM 1 #endif static pcmk__tls_t *tls = NULL; extern int remote_tls_fd; extern gboolean cib_shutdown_flag; int init_remote_listener(int port, gboolean encrypted); void cib_remote_connection_destroy(gpointer user_data); // @TODO This is rather short for someone to type their password #define REMOTE_AUTH_TIMEOUT 10000 int num_clients; static bool authenticate_user(const char *user, const char *passwd); static int cib_remote_listen(gpointer data); static int cib_remote_msg(gpointer data); static void remote_connection_destroy(gpointer user_data) { crm_info("No longer listening for remote connections"); return; } int init_remote_listener(int port, gboolean encrypted) { int rc; int *ssock = NULL; struct sockaddr_in saddr; int optval; static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = cib_remote_listen, .destroy = remote_connection_destroy, }; if (port <= 0) { /* don't start it */ return 0; } if (encrypted) { bool use_cert = pcmk__x509_enabled(); crm_notice("Starting TLS listener on port %d", port); rc = pcmk__init_tls(&tls, true, use_cert ? GNUTLS_CRD_CERTIFICATE : GNUTLS_CRD_ANON); if (rc != pcmk_rc_ok) { return -1; } } else { crm_warn("Starting plain-text listener on port %d", port); } #ifndef HAVE_PAM crm_warn("This build does not support remote administrators " "because PAM support is not available"); #endif /* create server socket */ ssock = pcmk__assert_alloc(1, sizeof(int)); *ssock = socket(AF_INET, SOCK_STREAM, 0); if (*ssock == -1) { crm_err("Listener socket creation failed: %s", pcmk_rc_str(errno)); free(ssock); return -1; } /* reuse address */ optval = 1; rc = setsockopt(*ssock, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { crm_err("Local address reuse not allowed on listener socket: %s", pcmk_rc_str(errno)); } /* bind server socket */ memset(&saddr, '\0', sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = INADDR_ANY; saddr.sin_port = htons(port); if (bind(*ssock, (struct sockaddr *)&saddr, sizeof(saddr)) == -1) { crm_err("Cannot bind to listener socket: %s", pcmk_rc_str(errno)); close(*ssock); free(ssock); return -2; } if (listen(*ssock, 10) == -1) { crm_err("Cannot listen on socket: %s", pcmk_rc_str(errno)); close(*ssock); free(ssock); return -3; } mainloop_add_fd("cib-remote", G_PRIORITY_DEFAULT, *ssock, ssock, &remote_listen_fd_callbacks); crm_debug("Started listener on port %d", port); return *ssock; } static int check_group_membership(const char *usr, const char *grp) { int index = 0; struct passwd *pwd = NULL; struct group *group = NULL; pwd = getpwnam(usr); if (pwd == NULL) { crm_notice("Rejecting remote client: '%s' is not a valid user", usr); return FALSE; } group = getgrgid(pwd->pw_gid); if (group != NULL && pcmk__str_eq(grp, group->gr_name, pcmk__str_none)) { return TRUE; } group = getgrnam(grp); if (group == NULL) { crm_err("Rejecting remote client: '%s' is not a valid group", grp); return FALSE; } while (TRUE) { char *member = group->gr_mem[index++]; if (member == NULL) { break; } else if (pcmk__str_eq(usr, member, pcmk__str_none)) { return TRUE; } } crm_notice("Rejecting remote client: User '%s' is not a member of " "group '%s'", usr, grp); return FALSE; } static gboolean cib_remote_auth(xmlNode * login) { const char *user = NULL; const char *pass = NULL; const char *tmp = NULL; if (login == NULL) { return FALSE; } if (!pcmk__xe_is(login, PCMK__XE_CIB_COMMAND)) { crm_warn("Rejecting remote client: Unrecognizable message " "(element '%s' not '" PCMK__XE_CIB_COMMAND "')", login->name); crm_log_xml_debug(login, "bad"); return FALSE; } tmp = pcmk__xe_get(login, PCMK_XA_OP); if (!pcmk__str_eq(tmp, "authenticate", pcmk__str_casei)) { crm_warn("Rejecting remote client: Unrecognizable message " "(operation '%s' not 'authenticate')", tmp); crm_log_xml_debug(login, "bad"); return FALSE; } user = pcmk__xe_get(login, PCMK_XA_USER); pass = pcmk__xe_get(login, PCMK__XA_PASSWORD); if (!user || !pass) { crm_warn("Rejecting remote client: No %s given", ((user == NULL)? "username" : "password")); crm_log_xml_debug(login, "bad"); return FALSE; } crm_log_xml_debug(login, "auth"); return check_group_membership(user, CRM_DAEMON_GROUP) && authenticate_user(user, pass); } static gboolean remote_auth_timeout_cb(gpointer data) { pcmk__client_t *client = data; client->remote->auth_timeout = 0; if (pcmk_is_set(client->flags, pcmk__client_authenticated)) { return FALSE; } mainloop_del_fd(client->remote->source); crm_err("Remote client authentication timed out"); return FALSE; } static int cib_remote_listen(gpointer data) { int csock = -1; unsigned laddr; struct sockaddr_storage addr; char ipstr[INET6_ADDRSTRLEN]; int ssock = *(int *)data; int rc; pcmk__client_t *new_client = NULL; static struct mainloop_fd_callbacks remote_client_fd_callbacks = { .dispatch = cib_remote_msg, .destroy = cib_remote_connection_destroy, }; /* accept the connection */ laddr = sizeof(addr); memset(&addr, 0, sizeof(addr)); csock = accept(ssock, (struct sockaddr *)&addr, &laddr); if (csock == -1) { crm_warn("Could not accept remote connection: %s", pcmk_rc_str(errno)); return TRUE; } pcmk__sockaddr2str(&addr, ipstr); rc = pcmk__set_nonblocking(csock); if (rc != pcmk_rc_ok) { crm_warn("Dropping remote connection from %s because " "it could not be set to non-blocking: %s", ipstr, pcmk_rc_str(rc)); close(csock); return TRUE; } num_clients++; new_client = pcmk__new_unauth_client(NULL); new_client->remote = pcmk__assert_alloc(1, sizeof(pcmk__remote_t)); if (ssock == remote_tls_fd) { pcmk__set_client_flags(new_client, pcmk__client_tls); /* create gnutls session for the server socket */ new_client->remote->tls_session = pcmk__new_tls_session(tls, csock); if (new_client->remote->tls_session == NULL) { close(csock); return TRUE; } } else { pcmk__set_client_flags(new_client, pcmk__client_tcp); new_client->remote->tcp_socket = csock; } // Require the client to authenticate within this time new_client->remote->auth_timeout = pcmk__create_timer(REMOTE_AUTH_TIMEOUT, remote_auth_timeout_cb, new_client); crm_info("%s connection from %s pending authentication for client %s", ((ssock == remote_tls_fd)? "Encrypted" : "Clear-text"), ipstr, new_client->id); new_client->remote->source = mainloop_add_fd("cib-remote-client", G_PRIORITY_DEFAULT, csock, new_client, &remote_client_fd_callbacks); return TRUE; } void cib_remote_connection_destroy(gpointer user_data) { pcmk__client_t *client = user_data; int csock = -1; if (client == NULL) { return; } crm_trace("Cleaning up after client %s disconnect", pcmk__client_name(client)); num_clients--; crm_trace("Num unfree'd clients: %d", num_clients); switch (PCMK__CLIENT_TYPE(client)) { case pcmk__client_tcp: csock = client->remote->tcp_socket; break; case pcmk__client_tls: if (client->remote->tls_session) { csock = pcmk__tls_get_client_sock(client->remote); if (pcmk_is_set(client->flags, pcmk__client_tls_handshake_complete)) { gnutls_bye(client->remote->tls_session, GNUTLS_SHUT_WR); } gnutls_deinit(client->remote->tls_session); client->remote->tls_session = NULL; } break; default: crm_warn("Unknown transport for client %s " QB_XS " flags=%#016" PRIx64, pcmk__client_name(client), client->flags); } if (csock >= 0) { close(csock); } pcmk__free_client(client); crm_trace("Freed the cib client"); if (cib_shutdown_flag) { cib_shutdown(0); } return; } static void cib_handle_remote_msg(pcmk__client_t *client, xmlNode *command) { if (!pcmk__xe_is(command, PCMK__XE_CIB_COMMAND)) { crm_log_xml_trace(command, "bad"); return; } if (client->name == NULL) { client->name = pcmk__str_copy(client->id); } /* unset dangerous options */ pcmk__xe_remove_attr(command, PCMK__XA_SRC); pcmk__xe_remove_attr(command, PCMK__XA_CIB_HOST); pcmk__xe_remove_attr(command, PCMK__XA_CIB_UPDATE); crm_xml_add(command, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(command, PCMK__XA_CIB_CLIENTID, client->id); crm_xml_add(command, PCMK__XA_CIB_CLIENTNAME, client->name); crm_xml_add(command, PCMK__XA_CIB_USER, client->user); if (pcmk__xe_get(command, PCMK__XA_CIB_CALLID) == NULL) { char *call_uuid = crm_generate_uuid(); /* fix the command */ crm_xml_add(command, PCMK__XA_CIB_CALLID, call_uuid); free(call_uuid); } if (pcmk__xe_get(command, PCMK__XA_CIB_CALLOPT) == NULL) { crm_xml_add_int(command, PCMK__XA_CIB_CALLOPT, 0); } crm_log_xml_trace(command, "Remote command: "); cib_common_callback_worker(0, 0, command, client, TRUE); } static int cib_remote_msg(gpointer data) { xmlNode *command = NULL; pcmk__client_t *client = data; int rc; const char *client_name = pcmk__client_name(client); crm_trace("Remote %s message received for client %s", pcmk__client_type_str(PCMK__CLIENT_TYPE(client)), client_name); if ((PCMK__CLIENT_TYPE(client) == pcmk__client_tls) && !pcmk_is_set(client->flags, pcmk__client_tls_handshake_complete)) { int rc = pcmk__read_handshake_data(client); if (rc == EAGAIN) { /* No more data is available at the moment. Just return for now; * we'll get invoked again once the client sends more. */ return 0; } else if (rc != pcmk_rc_ok) { return -1; } crm_debug("Completed TLS handshake with remote client %s", client_name); pcmk__set_client_flags(client, pcmk__client_tls_handshake_complete); if (client->remote->auth_timeout) { g_source_remove(client->remote->auth_timeout); } /* Now that the handshake is done, see if any client TLS certificate is * close to its expiration date and log if so. If a TLS certificate is not * in use, this function will just return so we don't need to check for the * session type here. */ pcmk__tls_check_cert_expiration(client->remote->tls_session); // Require the client to authenticate within this time client->remote->auth_timeout = pcmk__create_timer(REMOTE_AUTH_TIMEOUT, remote_auth_timeout_cb, client); return 0; } rc = pcmk__read_available_remote_data(client->remote); switch (rc) { case pcmk_rc_ok: break; case EAGAIN: /* We haven't read the whole message yet */ return 0; default: /* Error */ crm_trace("Error reading from remote client: %s", pcmk_rc_str(rc)); return -1; } /* must pass auth before we will process anything else */ if (!pcmk_is_set(client->flags, pcmk__client_authenticated)) { xmlNode *reg; const char *user = NULL; command = pcmk__remote_message_xml(client->remote); if (cib_remote_auth(command) == FALSE) { pcmk__xml_free(command); return -1; } pcmk__set_client_flags(client, pcmk__client_authenticated); g_source_remove(client->remote->auth_timeout); client->remote->auth_timeout = 0; - client->name = crm_element_value_copy(command, PCMK_XA_NAME); + client->name = pcmk__xe_get_copy(command, PCMK_XA_NAME); user = pcmk__xe_get(command, PCMK_XA_USER); if (user) { client->user = pcmk__str_copy(user); } crm_notice("Remote connection accepted for authenticated user %s " QB_XS " client %s", pcmk__s(user, ""), client_name); /* send ACK */ reg = pcmk__xe_create(NULL, PCMK__XE_CIB_RESULT); crm_xml_add(reg, PCMK__XA_CIB_OP, CRM_OP_REGISTER); crm_xml_add(reg, PCMK__XA_CIB_CLIENTID, client->id); pcmk__remote_send_xml(client->remote, reg); pcmk__xml_free(reg); pcmk__xml_free(command); } command = pcmk__remote_message_xml(client->remote); if (command != NULL) { crm_trace("Remote message received from client %s", client_name); cib_handle_remote_msg(client, command); pcmk__xml_free(command); } return 0; } #ifdef HAVE_PAM /*! * \internal * \brief Pass remote user's password to PAM * * \param[in] num_msg Number of entries in \p msg * \param[in] msg Array of PAM messages * \param[out] response Where to set response to PAM * \param[in] data User data (the password string) * * \return PAM return code (PAM_BUF_ERR for memory errors, PAM_CONV_ERR for all * other errors, or PAM_SUCCESS on success) * \note See pam_conv(3) for more explanation */ static int construct_pam_passwd(int num_msg, const struct pam_message **msg, struct pam_response **response, void *data) { /* In theory, multiple messages are allowed, but due to OS compatibility * issues, PAM implementations are recommended to only send one message at a * time. We can require that here for simplicity. */ CRM_CHECK((num_msg == 1) && (msg != NULL) && (response != NULL) && (data != NULL), return PAM_CONV_ERR); switch (msg[0]->msg_style) { case PAM_PROMPT_ECHO_OFF: case PAM_PROMPT_ECHO_ON: // Password requested break; case PAM_TEXT_INFO: crm_info("PAM: %s", msg[0]->msg); data = NULL; break; case PAM_ERROR_MSG: /* In theory we should show msg[0]->msg, but that might * contain the password, which we don't want in the logs */ crm_err("PAM reported an error"); data = NULL; break; default: crm_warn("Ignoring PAM message of unrecognized type %d", msg[0]->msg_style); return PAM_CONV_ERR; } *response = calloc(1, sizeof(struct pam_response)); if (*response == NULL) { return PAM_BUF_ERR; } (*response)->resp_retcode = 0; (*response)->resp = pcmk__str_copy((const char *) data); // Caller will free return PAM_SUCCESS; } #endif /*! * \internal * \brief Verify the username and password passed for a remote CIB connection * * \param[in] user Username passed for remote CIB connection * \param[in] passwd Password passed for remote CIB connection * * \return \c true if the username and password are accepted, otherwise \c false * \note This function rejects all credentials when built without PAM support. */ static bool authenticate_user(const char *user, const char *passwd) { #ifdef HAVE_PAM int rc = 0; bool pass = false; const void *p_user = NULL; struct pam_conv p_conv; struct pam_handle *pam_h = NULL; static const char *pam_name = NULL; if (pam_name == NULL) { pam_name = getenv("CIB_pam_service"); if (pam_name == NULL) { pam_name = "login"; } } p_conv.conv = construct_pam_passwd; p_conv.appdata_ptr = (void *) passwd; rc = pam_start(pam_name, user, &p_conv, &pam_h); if (rc != PAM_SUCCESS) { crm_warn("Rejecting remote client for user %s " "because PAM initialization failed: %s", user, pam_strerror(pam_h, rc)); goto bail; } // Check user credentials rc = pam_authenticate(pam_h, PAM_SILENT); if (rc != PAM_SUCCESS) { crm_notice("Access for remote user %s denied: %s", user, pam_strerror(pam_h, rc)); goto bail; } /* Get the authenticated user name (PAM modules can map the original name to * something else). Since the CIB manager runs as the daemon user (not * root), that is the only user that can be successfully authenticated. */ rc = pam_get_item(pam_h, PAM_USER, &p_user); if (rc != PAM_SUCCESS) { crm_warn("Rejecting remote client for user %s " "because PAM failed to return final user name: %s", user, pam_strerror(pam_h, rc)); goto bail; } if (p_user == NULL) { crm_warn("Rejecting remote client for user %s " "because PAM returned no final user name", user); goto bail; } // @TODO Why do we require these to match? if (!pcmk__str_eq(p_user, user, pcmk__str_none)) { crm_warn("Rejecting remote client for user %s " "because PAM returned different final user name %s", user, p_user); goto bail; } // Check user account restrictions (expiration, etc.) rc = pam_acct_mgmt(pam_h, PAM_SILENT); if (rc != PAM_SUCCESS) { crm_notice("Access for remote user %s denied: %s", user, pam_strerror(pam_h, rc)); goto bail; } pass = true; bail: pam_end(pam_h, rc); return pass; #else // @TODO Implement for non-PAM environments crm_warn("Rejecting remote user %s because this build does not have " "PAM support", user); return false; #endif } diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c index 99875b0a47..988a2afa89 100644 --- a/daemons/controld/controld_execd.c +++ b/daemons/controld/controld_execd.c @@ -1,2401 +1,2401 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // lrmd_event_data_t, lrmd_rsc_info_t, etc. #include #include #include #include #include #define START_DELAY_THRESHOLD 5 * 60 * 1000 #define MAX_LRM_REG_FAILS 30 struct delete_event_s { int rc; const char *rsc; lrm_state_t *lrm_state; }; static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id); static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list); static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data); static lrmd_event_data_t *construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, const char *rsc_id, const char *operation); static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, struct ra_metadata_s *md); static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level); static void lrm_connection_destroy(void) { if (pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) { crm_crit("Lost connection to local executor"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); controld_clear_fsa_input_flags(R_LRM_CONNECTED); } } static char * make_stop_id(const char *rsc, int call_id) { return crm_strdup_printf("%s:%d", rsc, call_id); } static void copy_instance_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") == NULL) { pcmk__insert_dup(user_data, (const char *) key, (const char *) value); } } static void copy_meta_keys(gpointer key, gpointer value, gpointer user_data) { if (strstr(key, CRM_META "_") != NULL) { pcmk__insert_dup(user_data, (const char *) key, (const char *) value); } } /*! * \internal * \brief Remove a recurring operation from a resource's history * * \param[in,out] history Resource history to modify * \param[in] op Operation to remove * * \return TRUE if the operation was found and removed, FALSE otherwise */ static gboolean history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_event_data_t *existing = iter->data; if ((op->interval_ms == existing->interval_ms) && pcmk__str_eq(op->rsc_id, existing->rsc_id, pcmk__str_none) && pcmk__str_eq(op->op_type, existing->op_type, pcmk__str_casei)) { history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); lrmd_free_event(existing); return TRUE; } } return FALSE; } /*! * \internal * \brief Free all recurring operations in resource history * * \param[in,out] history Resource history to modify */ static void history_free_recurring_ops(rsc_history_t *history) { GList *iter; for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { lrmd_free_event(iter->data); } g_list_free(history->recurring_op_list); history->recurring_op_list = NULL; } /*! * \internal * \brief Free resource history * * \param[in,out] history Resource history to free */ void history_free(gpointer data) { rsc_history_t *history = (rsc_history_t*)data; if (history->stop_params) { g_hash_table_destroy(history->stop_params); } /* Don't need to free history->rsc.id because it's set to history->id */ free(history->rsc.type); free(history->rsc.standard); free(history->rsc.provider); lrmd_free_event(history->failed); lrmd_free_event(history->last); free(history->id); history_free_recurring_ops(history); free(history); } static void update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) { int target_rc = 0; rsc_history_t *entry = NULL; if (op->rsc_deleted) { crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type); controld_delete_resource_history(op->rsc_id, lrm_state->node_name, NULL, crmd_cib_smart_opt()); return; } if (pcmk__str_eq(op->op_type, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { return; } crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type); entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id); if (entry == NULL && rsc) { entry = pcmk__assert_alloc(1, sizeof(rsc_history_t)); entry->id = pcmk__str_copy(op->rsc_id); g_hash_table_insert(lrm_state->resource_history, entry->id, entry); entry->rsc.id = entry->id; entry->rsc.type = pcmk__str_copy(rsc->type); entry->rsc.standard = pcmk__str_copy(rsc->standard); entry->rsc.provider = pcmk__str_copy(rsc->provider); } else if (entry == NULL) { crm_info("Resource %s no longer exists, not updating cache", op->rsc_id); return; } entry->last_callid = op->call_id; target_rc = rsc_op_expected_rc(op); if (op->op_status == PCMK_EXEC_CANCELLED) { if (op->interval_ms > 0) { crm_trace("Removing cancelled recurring op: " PCMK__OP_FMT, op->rsc_id, op->op_type, op->interval_ms); history_remove_recurring_op(entry, op); return; } else { crm_trace("Skipping " PCMK__OP_FMT " rc=%d, status=%d", op->rsc_id, op->op_type, op->interval_ms, op->rc, op->op_status); } } else if (did_rsc_op_fail(op, target_rc)) { /* Store failed monitors here, otherwise the block below will cause them * to be forgotten when a stop happens. */ if (entry->failed) { lrmd_free_event(entry->failed); } entry->failed = lrmd_copy_event(op); } else if (op->interval_ms == 0) { if (entry->last) { lrmd_free_event(entry->last); } entry->last = lrmd_copy_event(op); if (op->params && pcmk__strcase_any_of(op->op_type, PCMK_ACTION_START, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, PCMK_ACTION_MONITOR, NULL)) { if (entry->stop_params) { g_hash_table_destroy(entry->stop_params); } entry->stop_params = pcmk__strkey_table(free, free); g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params); } } if (op->interval_ms > 0) { /* Ensure there are no duplicates */ history_remove_recurring_op(entry, op); crm_trace("Adding recurring op: " PCMK__OP_FMT, op->rsc_id, op->op_type, op->interval_ms); entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); } else if ((entry->recurring_op_list != NULL) && !pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR, pcmk__str_casei)) { crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT, g_list_length(entry->recurring_op_list), op->rsc_id, op->op_type, op->interval_ms); history_free_recurring_ops(entry); } } /*! * \internal * \brief Send a direct OK ack for a resource task * * \param[in] lrm_state LRM connection * \param[in] input Input message being ack'ed * \param[in] rsc_id ID of affected resource * \param[in] rsc Affected resource (if available) * \param[in] task Operation task being ack'ed * \param[in] ack_host Name of host to send ack to * \param[in] ack_sys IPC system name to ack */ static void send_task_ok_ack(const lrm_state_t *lrm_state, const ha_msg_input_t *input, const char *rsc_id, const lrmd_rsc_info_t *rsc, const char *task, const char *ack_host, const char *ack_sys) { lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task); lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); controld_ack_event_directly(ack_host, ack_sys, rsc, op, rsc_id); lrmd_free_event(op); } static inline const char * op_node_name(lrmd_event_data_t *op) { return pcmk__s(op->remote_nodename, controld_globals.cluster->priv->node_name); } void lrm_op_callback(lrmd_event_data_t * op) { CRM_CHECK(op != NULL, return); switch (op->type) { case lrmd_event_disconnect: if (op->remote_nodename == NULL) { /* If this is the local executor IPC connection, set the right * bits in the controller when the connection goes down. */ lrm_connection_destroy(); } break; case lrmd_event_exec_complete: { lrm_state_t *lrm_state = controld_get_executor_state(op_node_name(op), false); pcmk__assert(lrm_state != NULL); process_lrm_event(lrm_state, op, NULL, NULL); } break; default: break; } } static void try_local_executor_connect(long long action, fsa_data_t *msg_data, lrm_state_t *lrm_state) { int rc = pcmk_rc_ok; crm_debug("Connecting to the local executor"); // If we can connect, great rc = controld_connect_local_executor(lrm_state); if (rc == pcmk_rc_ok) { controld_set_fsa_input_flags(R_LRM_CONNECTED); crm_info("Connection to the local executor established"); return; } // Otherwise, if we can try again, set a timer to do so if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) { crm_warn("Failed to connect to the local executor %d time%s " "(%d max): %s", lrm_state->num_lrm_register_fails, pcmk__plural_s(lrm_state->num_lrm_register_fails), MAX_LRM_REG_FAILS, pcmk_rc_str(rc)); controld_start_wait_timer(); crmd_fsa_stall(FALSE); return; } // Otherwise give up crm_err("Failed to connect to the executor the max allowed " "%d time%s: %s", lrm_state->num_lrm_register_fails, pcmk__plural_s(lrm_state->num_lrm_register_fails), pcmk_rc_str(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } /* A_LRM_CONNECT */ void do_lrm_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* This only pertains to local executor connections. Remote connections are * handled as resources within the scheduler. Connecting and disconnecting * from remote executor instances is handled differently. */ lrm_state_t *lrm_state = NULL; if (controld_globals.cluster->priv->node_name == NULL) { return; // Shouldn't be possible } lrm_state = controld_get_executor_state(NULL, true); if (lrm_state == NULL) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } if (action & A_LRM_DISCONNECT) { if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) { if (action == A_LRM_DISCONNECT) { crmd_fsa_stall(FALSE); return; } } controld_clear_fsa_input_flags(R_LRM_CONNECTED); lrm_state_disconnect(lrm_state); lrm_state_reset_tables(lrm_state, FALSE); } if (action & A_LRM_CONNECT) { try_local_executor_connect(action, msg_data, lrm_state); } if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __func__); } } static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level) { int counter = 0; gboolean rc = TRUE; const char *when = "lrm disconnect"; GHashTableIter gIter; const char *key = NULL; rsc_history_t *entry = NULL; active_op_t *pending = NULL; crm_debug("Checking for active resources before exit"); if (cur_state == S_TERMINATE) { log_level = LOG_ERR; when = "shutdown"; } else if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { when = "shutdown... waiting"; } if ((lrm_state->active_ops != NULL) && lrm_state_is_connected(lrm_state)) { guint removed = g_hash_table_foreach_remove(lrm_state->active_ops, stop_recurring_actions, lrm_state); guint nremaining = g_hash_table_size(lrm_state->active_ops); if (removed || nremaining) { crm_notice("Stopped %u recurring operation%s at %s (%u remaining)", removed, pcmk__plural_s(removed), when, nremaining); } } if (lrm_state->active_ops != NULL) { g_hash_table_iter_init(&gIter, lrm_state->active_ops); while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) { /* Ignore recurring actions in the shutdown calculations */ if (pending->interval_ms == 0) { counter++; } } } if (counter > 0) { do_crm_log(log_level, "%d pending executor operation%s at %s", counter, pcmk__plural_s(counter), when); if ((cur_state == S_TERMINATE) || !pcmk_is_set(controld_globals.fsa_input_register, R_SENT_RSC_STOP)) { g_hash_table_iter_init(&gIter, lrm_state->active_ops); while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) { do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key); } } else { rc = FALSE; } return rc; } if (lrm_state->resource_history == NULL) { return rc; } if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { /* At this point we're not waiting, we're just shutting down */ when = "shutdown"; } counter = 0; g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) { if (is_rsc_active(lrm_state, entry->id) == FALSE) { continue; } counter++; if (log_level == LOG_ERR) { crm_info("Found %s active at %s", entry->id, when); } else { crm_trace("Found %s active at %s", entry->id, when); } if (lrm_state->active_ops != NULL) { GHashTableIter hIter; g_hash_table_iter_init(&hIter, lrm_state->active_ops); while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) { if (pcmk__str_eq(entry->id, pending->rsc_id, pcmk__str_none)) { crm_notice("%sction %s (%s) incomplete at %s", pending->interval_ms == 0 ? "A" : "Recurring a", key, pending->op_key, when); } } } } if (counter) { crm_err("%d resource%s active at %s", counter, (counter == 1)? " was" : "s were", when); } return rc; } static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id) { rsc_history_t *entry = NULL; entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->last == NULL) { return FALSE; } crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type, entry->last->interval_ms, entry->last->rc); if ((entry->last->rc == PCMK_OCF_OK) && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_STOP, pcmk__str_casei)) { return FALSE; } else if (entry->last->rc == PCMK_OCF_OK && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_MIGRATE_TO, pcmk__str_casei)) { // A stricter check is too complex ... leave that to the scheduler return FALSE; } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) { return FALSE; } else if ((entry->last->interval_ms == 0) && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) { /* Badly configured resources can't be reliably stopped */ return FALSE; } return TRUE; } static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list) { GHashTableIter iter; rsc_history_t *entry = NULL; g_hash_table_iter_init(&iter, lrm_state->resource_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) { GList *gIter = NULL; xmlNode *xml_rsc = pcmk__xe_create(rsc_list, PCMK__XE_LRM_RESOURCE); crm_xml_add(xml_rsc, PCMK_XA_ID, entry->id); crm_xml_add(xml_rsc, PCMK_XA_TYPE, entry->rsc.type); crm_xml_add(xml_rsc, PCMK_XA_CLASS, entry->rsc.standard); crm_xml_add(xml_rsc, PCMK_XA_PROVIDER, entry->rsc.provider); if (entry->last && entry->last->params) { static const char *name = CRM_META "_" PCMK__META_CONTAINER; const char *container = g_hash_table_lookup(entry->last->params, name); if (container) { crm_trace("Resource %s is a part of container resource %s", entry->id, container); crm_xml_add(xml_rsc, PCMK__META_CONTAINER, container); } } controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->failed, lrm_state->node_name); controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->last, lrm_state->node_name); for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) { controld_add_resource_history_xml(xml_rsc, &(entry->rsc), gIter->data, lrm_state->node_name); } } return FALSE; } xmlNode * controld_query_executor_state(void) { // @TODO Ensure all callers handle NULL returns xmlNode *xml_state = NULL; xmlNode *xml_data = NULL; xmlNode *rsc_list = NULL; pcmk__node_status_t *peer = NULL; lrm_state_t *lrm_state = controld_get_executor_state(NULL, false); if (!lrm_state) { crm_err("Could not get executor state for local node"); return NULL; } peer = pcmk__get_node(0, lrm_state->node_name, NULL, pcmk__node_search_any); CRM_CHECK(peer != NULL, return NULL); xml_state = create_node_state_update(peer, node_update_cluster|node_update_peer, NULL, __func__); if (xml_state == NULL) { return NULL; } xml_data = pcmk__xe_create(xml_state, PCMK__XE_LRM); crm_xml_add(xml_data, PCMK_XA_ID, peer->xml_id); rsc_list = pcmk__xe_create(xml_data, PCMK__XE_LRM_RESOURCES); // Build a list of active (not necessarily running) resources build_active_RAs(lrm_state, rsc_list); crm_log_xml_trace(xml_state, "Current executor state"); return xml_state; } /*! * \internal * \brief Map standard Pacemaker return code to operation status and OCF code * * \param[out] event Executor event whose status and return code should be set * \param[in] rc Standard Pacemaker return code */ void controld_rc2event(lrmd_event_data_t *event, int rc) { /* This is called for cleanup requests from controller peers/clients, not * for resource actions, so no exit reason is needed. */ switch (rc) { case pcmk_rc_ok: lrmd__set_result(event, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL); break; case EACCES: lrmd__set_result(event, PCMK_OCF_INSUFFICIENT_PRIV, PCMK_EXEC_ERROR, NULL); break; default: lrmd__set_result(event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, NULL); break; } } /*! * \internal * \brief Trigger a new transition after CIB status was deleted * * If a CIB status delete was not expected (as part of the transition graph), * trigger a new transition by updating the (arbitrary) "last-lrm-refresh" * cluster property. * * \param[in] from_sys IPC name that requested the delete * \param[in] rsc_id Resource whose status was deleted (for logging only) */ void controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id) { if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_casei)) { char *now_s = crm_strdup_printf("%lld", (long long) time(NULL)); crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id); cib__update_node_attr(controld_globals.logger_out, controld_globals.cib_conn, cib_none, PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL, "last-lrm-refresh", now_s, NULL, NULL); free(now_s); } } static void notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc) { lrmd_event_data_t *op = NULL; const char *from_sys = pcmk__xe_get(input->msg, PCMK__XA_CRM_SYS_FROM); const char *from_host = pcmk__xe_get(input->msg, PCMK__XA_SRC); crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "localhost"), rsc_id, ((rc == pcmk_ok)? "" : " not")); op = construct_op(lrm_state, input->xml, rsc_id, PCMK_ACTION_DELETE); controld_rc2event(op, pcmk_legacy2rc(rc)); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); controld_trigger_delete_refresh(from_sys, rsc_id); } static gboolean lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data) { struct delete_event_s *event = user_data; struct pending_deletion_op_s *op = value; if (pcmk__str_eq(event->rsc, op->rsc, pcmk__str_none)) { notify_deleted(event->lrm_state, op->input, event->rsc, event->rc); return TRUE; } return FALSE; } static gboolean lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data) { const char *rsc = user_data; active_op_t *pending = value; if (pcmk__str_eq(rsc, pending->rsc_id, pcmk__str_none)) { crm_info("Removing op %s:%d for deleted resource %s", pending->op_key, pending->call_id, rsc); return TRUE; } return FALSE; } static void delete_rsc_entry(lrm_state_t *lrm_state, ha_msg_input_t *input, const char *rsc_id, GHashTableIter *rsc_iter, int rc, const char *user_name, bool from_cib) { struct delete_event_s event; CRM_CHECK(rsc_id != NULL, return); if (rc == pcmk_ok) { char *rsc_id_copy = pcmk__str_copy(rsc_id); if (rsc_iter) { g_hash_table_iter_remove(rsc_iter); } else { g_hash_table_remove(lrm_state->resource_history, rsc_id_copy); } if (from_cib) { controld_delete_resource_history(rsc_id_copy, lrm_state->node_name, user_name, crmd_cib_smart_opt()); } g_hash_table_foreach_remove(lrm_state->active_ops, lrm_remove_deleted_op, rsc_id_copy); free(rsc_id_copy); } if (input) { notify_deleted(lrm_state, input, rsc_id, rc); } event.rc = rc; event.rsc = rsc_id; event.lrm_state = lrm_state; g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event); } static inline gboolean last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms) { if (entry == NULL) { return FALSE; } if (op == NULL) { return TRUE; } return (pcmk__str_eq(op, entry->failed->op_type, pcmk__str_casei) && (interval_ms == entry->failed->interval_ms)); } /*! * \internal * \brief Clear a resource's last failure * * Erase a resource's last failure on a particular node from both the * LRM resource history in the CIB, and the resource history remembered * for the LRM state. * * \param[in] rsc_id Resource name * \param[in] node_name Node name * \param[in] operation If specified, only clear if matching this operation * \param[in] interval_ms If operation is specified, it has this interval */ void lrm_clear_last_failure(const char *rsc_id, const char *node_name, const char *operation, guint interval_ms) { lrm_state_t *lrm_state = controld_get_executor_state(node_name, false); if (lrm_state == NULL) { return; } if (lrm_state->resource_history != NULL) { rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (last_failed_matches_op(entry, operation, interval_ms)) { lrmd_free_event(entry->failed); entry->failed = NULL; } } } /* Returns: gboolean - cancellation is in progress */ static gboolean cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove) { int rc = pcmk_ok; char *local_key = NULL; active_op_t *pending = NULL; CRM_CHECK(op != 0, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); if (key == NULL) { local_key = make_stop_id(rsc_id, op); key = local_key; } pending = g_hash_table_lookup(lrm_state->active_ops, key); if (pending) { if (remove && !pcmk_is_set(pending->flags, active_op_remove)) { controld_set_active_op_flags(pending, active_op_remove); crm_debug("Scheduling %s for removal", key); } if (pcmk_is_set(pending->flags, active_op_cancelled)) { crm_debug("Operation %s already cancelled", key); free(local_key); return FALSE; } controld_set_active_op_flags(pending, active_op_cancelled); } else { crm_info("No pending op found for %s", key); free(local_key); return FALSE; } crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key); rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms); if (rc == pcmk_ok) { crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key); free(local_key); return TRUE; } crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key); /* The caller needs to make sure the entry is * removed from the active operations list * * Usually by returning TRUE inside the worker function * supplied to g_hash_table_foreach_remove() * * Not removing the entry from active operations will block * the node from shutting down */ free(local_key); return FALSE; } struct cancel_data { gboolean done; gboolean remove; const char *key; lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean cancel_action_by_key(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct cancel_data *data = user_data; active_op_t *op = value; if (pcmk__str_eq(op->op_key, data->key, pcmk__str_none)) { data->done = TRUE; remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove); } return remove; } static gboolean cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove) { guint removed = 0; struct cancel_data data; CRM_CHECK(rsc != NULL, return FALSE); CRM_CHECK(key != NULL, return FALSE); data.key = key; data.rsc = rsc; data.done = FALSE; data.remove = remove; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->active_ops, cancel_action_by_key, &data); crm_trace("Removed %u op cache entries, new size: %u", removed, g_hash_table_size(lrm_state->active_ops)); return data.done; } /*! * \internal * \brief Retrieve resource information from LRM * * \param[in,out] lrm_state Executor connection state to use * \param[in] rsc_xml XML containing resource configuration * \param[in] do_create If true, register resource if not already * \param[out] rsc_info Where to store information obtained from executor * * \retval pcmk_ok Success (and rsc_info holds newly allocated result) * \retval -EINVAL Required information is missing from arguments * \retval -ENOTCONN No active connection to LRM * \retval -ENODEV Resource not found * \retval -errno Error communicating with executor when registering resource * * \note Caller is responsible for freeing result on success. */ static int get_lrm_resource(lrm_state_t *lrm_state, const xmlNode *rsc_xml, gboolean do_create, lrmd_rsc_info_t **rsc_info) { const char *id = pcmk__xe_id(rsc_xml); CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL); CRM_CHECK(id, return -EINVAL); if (lrm_state_is_connected(lrm_state) == FALSE) { return -ENOTCONN; } crm_trace("Retrieving resource information for %s from the executor", id); *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); // If resource isn't known by ID, try clone name, if provided if (!*rsc_info) { const char *long_id = pcmk__xe_get(rsc_xml, PCMK__XA_LONG_ID); if (long_id) { *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0); } } if ((*rsc_info == NULL) && do_create) { const char *class = pcmk__xe_get(rsc_xml, PCMK_XA_CLASS); const char *provider = pcmk__xe_get(rsc_xml, PCMK_XA_PROVIDER); const char *type = pcmk__xe_get(rsc_xml, PCMK_XA_TYPE); int rc; crm_trace("Registering resource %s with the executor", id); rc = lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring); if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Could not register resource %s with the executor on %s: %s " QB_XS " rc=%d", id, lrm_state->node_name, pcmk_strerror(rc), rc); /* Register this as an internal error if this involves the local * executor. Otherwise, we're likely dealing with an unresponsive * remote node, which is not an FSA failure. */ if (lrm_state_is_local(lrm_state) == TRUE) { register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } return rc; } *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0); } return *rsc_info? pcmk_ok : -ENODEV; } static void delete_resource(lrm_state_t *lrm_state, const char *id, lrmd_rsc_info_t *rsc, GHashTableIter *iter, const char *sys, const char *user, ha_msg_input_t *request, bool unregister, bool from_cib) { int rc = pcmk_ok; crm_info("Removing resource %s from executor for %s%s%s", id, sys, (user? " as " : ""), (user? user : "")); if (rsc && unregister) { rc = lrm_state_unregister_rsc(lrm_state, id, 0); } if (rc == pcmk_ok) { crm_trace("Resource %s deleted from executor", id); } else if (rc == -EINPROGRESS) { crm_info("Deletion of resource '%s' from executor is pending", id); if (request) { struct pending_deletion_op_s *op = NULL; - char *ref = crm_element_value_copy(request->msg, PCMK_XA_REFERENCE); + char *ref = pcmk__xe_get_copy(request->msg, PCMK_XA_REFERENCE); op = pcmk__assert_alloc(1, sizeof(struct pending_deletion_op_s)); op->rsc = pcmk__str_copy(rsc->id); op->input = copy_ha_msg_input(request); g_hash_table_insert(lrm_state->deletion_ops, ref, op); } return; } else { crm_warn("Could not delete '%s' from executor for %s%s%s: %s " QB_XS " rc=%d", id, sys, (user? " as " : ""), (user? user : ""), pcmk_strerror(rc), rc); } delete_rsc_entry(lrm_state, request, id, iter, rc, user, from_cib); } static int get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id) { int call_id = 999999999; rsc_history_t *entry = NULL; if(lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* Make sure the call id is greater than the last successful operation, * otherwise the failure will not result in a possible recovery of the resource * as it could appear the failure occurred before the successful start */ if (entry) { call_id = entry->last_callid + 1; } if (call_id < 0) { call_id = 1; } return call_id; } static void fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status, enum ocf_exitcode op_exitcode, const char *exit_reason) { op->call_id = get_fake_call_id(lrm_state, op->rsc_id); op->t_run = time(NULL); op->t_rcchange = op->t_run; lrmd__set_result(op, op_exitcode, op_status, exit_reason); } static void force_reprobe(lrm_state_t *lrm_state, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node, bool reprobe_all_nodes) { GHashTableIter gIter; rsc_history_t *entry = NULL; crm_info("Clearing resource history on node %s", lrm_state->node_name); g_hash_table_iter_init(&gIter, lrm_state->resource_history); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { /* only unregister the resource during a reprobe if it is not a remote connection * resource. otherwise unregistering the connection will terminate remote-node * membership */ bool unregister = true; if (is_remote_lrmd_ra(NULL, NULL, entry->id)) { unregister = false; if (reprobe_all_nodes) { lrm_state_t *remote_lrm_state = controld_get_executor_state(entry->id, false); if (remote_lrm_state != NULL) { /* If reprobing all nodes, be sure to reprobe the remote * node before clearing its connection resource */ force_reprobe(remote_lrm_state, from_sys, from_host, user_name, TRUE, reprobe_all_nodes); } } } /* Don't delete from the CIB, since we'll delete the whole node's LRM * state from the CIB soon */ delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, user_name, NULL, unregister, false); } /* Now delete the copy in the CIB */ controld_delete_node_state(lrm_state->node_name, controld_section_lrm, cib_none); } /*! * \internal * \brief Fail a requested action without actually executing it * * For an action that can't be executed, process it similarly to an actual * execution result, with specified error status (except for notify actions, * which will always be treated as successful). * * \param[in,out] lrm_state Executor connection that action is for * \param[in] action Action XML from request * \param[in] rc Desired return code to use * \param[in] op_status Desired operation status to use * \param[in] exit_reason Human-friendly detail, if error */ static void synthesize_lrmd_failure(lrm_state_t *lrm_state, const xmlNode *action, int op_status, enum ocf_exitcode rc, const char *exit_reason) { lrmd_event_data_t *op = NULL; const char *operation = pcmk__xe_get(action, PCMK_XA_OPERATION); const char *target_node = pcmk__xe_get(action, PCMK__META_ON_NODE); xmlNode *xml_rsc = pcmk__xe_first_child(action, PCMK_XE_PRIMITIVE, NULL, NULL); if ((xml_rsc == NULL) || (pcmk__xe_id(xml_rsc) == NULL)) { /* @TODO Should we do something else, like direct ack? */ crm_info("Can't fake %s failure (%d) on %s without resource configuration", pcmk__xe_get(action, PCMK__XA_OPERATION_KEY), rc, target_node); return; } else if(operation == NULL) { /* This probably came from crm_resource -C, nothing to do */ crm_info("Can't fake %s failure (%d) on %s without operation", pcmk__xe_id(xml_rsc), rc, target_node); return; } op = construct_op(lrm_state, action, pcmk__xe_id(xml_rsc), operation); if (pcmk__str_eq(operation, PCMK_ACTION_NOTIFY, pcmk__str_casei)) { // Notifications can't fail fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_OK, NULL); } else { fake_op_status(lrm_state, op, op_status, rc, exit_reason); } crm_info("Faking " PCMK__OP_FMT " result (%d) on %s", op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node); // Process the result as if it came from the LRM process_lrm_event(lrm_state, op, NULL, action); lrmd_free_event(op); } /*! * \internal * \brief Get target of an LRM operation (replacing \p NULL with local node * name) * * \param[in] xml LRM operation data XML * * \return LRM operation target node name (local node or Pacemaker Remote node) */ static const char * lrm_op_target(const xmlNode *xml) { const char *target = NULL; if (xml) { target = pcmk__xe_get(xml, PCMK__META_ON_NODE); } if (target == NULL) { target = controld_globals.cluster->priv->node_name; } return target; } static void fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name, const char *from_host, const char *from_sys) { lrmd_event_data_t *op = NULL; lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = pcmk__xe_first_child(xml, PCMK_XE_PRIMITIVE, NULL, NULL); CRM_CHECK(xml_rsc != NULL, return); /* The executor simply executes operations and reports the results, without * any concept of success or failure, so to fail a resource, we must fake * what a failure looks like. * * To do this, we create a fake executor operation event for the resource, * and pass that event to the executor client callback so it will be * processed as if it came from the executor. */ op = construct_op(lrm_state, xml, pcmk__xe_id(xml_rsc), "asyncmon"); free((char*) op->user_data); op->user_data = NULL; op->interval_ms = 0; if (user_name && !pcmk__is_privileged(user_name)) { crm_err("%s does not have permission to fail %s", user_name, pcmk__xe_id(xml_rsc)); fake_op_status(lrm_state, op, PCMK_EXEC_ERROR, PCMK_OCF_INSUFFICIENT_PRIV, "Unprivileged user cannot fail resources"); controld_ack_event_directly(from_host, from_sys, NULL, op, pcmk__xe_id(xml_rsc)); lrmd_free_event(op); return; } if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) { crm_info("Failing resource %s...", rsc->id); fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_UNKNOWN_ERROR, "Simulated failure"); process_lrm_event(lrm_state, op, NULL, xml); op->rc = PCMK_OCF_OK; // The request to fail the resource succeeded lrmd_free_rsc_info(rsc); } else { crm_info("Cannot find/create resource in order to fail it..."); crm_log_xml_warn(xml, "bad input"); fake_op_status(lrm_state, op, PCMK_EXEC_ERROR, PCMK_OCF_UNKNOWN_ERROR, "Cannot fail unknown resource"); } controld_ack_event_directly(from_host, from_sys, NULL, op, pcmk__xe_id(xml_rsc)); lrmd_free_event(op); } static void handle_reprobe_op(lrm_state_t *lrm_state, xmlNode *msg, const char *from_sys, const char *from_host, const char *user_name, gboolean is_remote_node, bool reprobe_all_nodes) { crm_notice("Forcing the status of all resources to be redetected"); force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node, reprobe_all_nodes); if (!pcmk__strcase_any_of(from_sys, CRM_SYSTEM_PENGINE, CRM_SYSTEM_TENGINE, NULL)) { xmlNode *reply = pcmk__new_reply(msg, NULL); crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } pcmk__xml_free(reply); } } static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys) { char *op_key = NULL; char *meta_key = NULL; int call = 0; const char *call_id = NULL; const char *op_task = NULL; guint interval_ms = 0; gboolean in_progress = FALSE; xmlNode *params = pcmk__xe_first_child(input->xml, PCMK__XE_ATTRIBUTES, NULL, NULL); CRM_CHECK(params != NULL, return FALSE); meta_key = crm_meta_name(PCMK_XA_OPERATION); op_task = pcmk__xe_get(params, meta_key); free(meta_key); CRM_CHECK(op_task != NULL, return FALSE); meta_key = crm_meta_name(PCMK_META_INTERVAL); if (pcmk__xe_get_guint(params, meta_key, &interval_ms) != pcmk_rc_ok) { free(meta_key); return FALSE; } free(meta_key); op_key = pcmk__op_key(rsc->id, op_task, interval_ms); meta_key = crm_meta_name(PCMK__XA_CALL_ID); call_id = pcmk__xe_get(params, meta_key); free(meta_key); crm_debug("Scheduler requested op %s (call=%s) be cancelled", op_key, (call_id? call_id : "NA")); pcmk__scan_min_int(call_id, &call, 0); if (call == 0) { // Normal case when the scheduler cancels a recurring op in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE); } else { // Normal case when the scheduler cancels an orphan op in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE); } // Acknowledge cancellation operation if for a remote connection resource if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) { char *op_id = make_stop_id(rsc->id, call); if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) { crm_info("Nothing known about operation %d for %s", call, op_key); } controld_delete_action_history_by_key(rsc->id, lrm_state->node_name, op_key, call); send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task, from_host, from_sys); /* needed at least for cancellation of a remote operation */ if (lrm_state->active_ops != NULL) { g_hash_table_remove(lrm_state->active_ops, op_id); } free(op_id); } free(op_key); return TRUE; } static void do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host, bool crm_rsc_delete, const char *user_name) { bool unregister = true; int cib_rc = controld_delete_resource_history(rsc->id, lrm_state->node_name, user_name, cib_dryrun|cib_sync_call); if (cib_rc != pcmk_rc_ok) { lrmd_event_data_t *op = NULL; op = construct_op(lrm_state, input->xml, rsc->id, PCMK_ACTION_DELETE); /* These are resource clean-ups, not actions, so no exit reason is * needed. */ lrmd__set_result(op, pcmk_rc2ocf(cib_rc), PCMK_EXEC_ERROR, NULL); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc->id); lrmd_free_event(op); return; } if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) { unregister = false; } delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, user_name, input, unregister, true); } // User data for asynchronous metadata execution struct metadata_cb_data { lrmd_rsc_info_t *rsc; // Copy of resource information xmlNode *input_xml; // Copy of FSA input XML }; static struct metadata_cb_data * new_metadata_cb_data(lrmd_rsc_info_t *rsc, xmlNode *input_xml) { struct metadata_cb_data *data = NULL; data = pcmk__assert_alloc(1, sizeof(struct metadata_cb_data)); data->input_xml = pcmk__xml_copy(NULL, input_xml); data->rsc = lrmd_copy_rsc_info(rsc); return data; } static void free_metadata_cb_data(struct metadata_cb_data *data) { lrmd_free_rsc_info(data->rsc); pcmk__xml_free(data->input_xml); free(data); } /*! * \internal * \brief Execute an action after metadata has been retrieved * * \param[in] pid Ignored * \param[in] result Result of metadata action * \param[in] user_data Metadata callback data */ static void metadata_complete(int pid, const pcmk__action_result_t *result, void *user_data) { struct metadata_cb_data *data = (struct metadata_cb_data *) user_data; struct ra_metadata_s *md = NULL; lrm_state_t *lrm_state = controld_get_executor_state(lrm_op_target(data->input_xml), false); if ((lrm_state != NULL) && pcmk__result_ok(result)) { md = controld_cache_metadata(lrm_state->metadata_cache, data->rsc, result->action_stdout); } if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) { do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md); } free_metadata_cb_data(data); } /* A_LRM_INVOKE */ void do_lrm_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { lrm_state_t *lrm_state = NULL; const char *crm_op = NULL; const char *from_sys = NULL; const char *from_host = NULL; const char *operation = NULL; ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); const char *user_name = NULL; const char *target_node = lrm_op_target(input->xml); gboolean is_remote_node = FALSE; bool crm_rsc_delete = FALSE; // Message routed to the local node is targeting a specific, non-local node is_remote_node = !controld_is_local_node(target_node); lrm_state = controld_get_executor_state(target_node, false); if ((lrm_state == NULL) && is_remote_node) { crm_err("Failing action because local node has never had connection to remote node %s", target_node); synthesize_lrmd_failure(NULL, input->xml, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, "Local node has no connection to remote"); return; } pcmk__assert(lrm_state != NULL); user_name = pcmk__update_acl_user(input->msg, PCMK__XA_CRM_USER, NULL); crm_op = pcmk__xe_get(input->msg, PCMK__XA_CRM_TASK); from_sys = pcmk__xe_get(input->msg, PCMK__XA_CRM_SYS_FROM); if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { from_host = pcmk__xe_get(input->msg, PCMK__XA_SRC); } if (pcmk__str_eq(crm_op, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) { if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { crm_rsc_delete = TRUE; // from crm_resource } operation = PCMK_ACTION_DELETE; } else if (input->xml != NULL) { operation = pcmk__xe_get(input->xml, PCMK_XA_OPERATION); } CRM_CHECK(!pcmk__str_empty(crm_op) || !pcmk__str_empty(operation), return); crm_trace("'%s' execution request from %s as %s user", pcmk__s(crm_op, operation), pcmk__s(from_sys, "unknown subsystem"), pcmk__s(user_name, "current")); if (pcmk__str_eq(crm_op, CRM_OP_LRM_FAIL, pcmk__str_none)) { fail_lrm_resource(input->xml, lrm_state, user_name, from_host, from_sys); } else if (pcmk__str_eq(crm_op, CRM_OP_REPROBE, pcmk__str_none) || pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_none)) { const char *raw_target = NULL; if (input->xml != NULL) { // For CRM_OP_REPROBE, a NULL target means we're targeting all nodes raw_target = pcmk__xe_get(input->xml, PCMK__META_ON_NODE); } handle_reprobe_op(lrm_state, input->msg, from_sys, from_host, user_name, is_remote_node, (raw_target == NULL)); } else if (operation != NULL) { lrmd_rsc_info_t *rsc = NULL; xmlNode *xml_rsc = pcmk__xe_first_child(input->xml, PCMK_XE_PRIMITIVE, NULL, NULL); gboolean create_rsc = !pcmk__str_eq(operation, PCMK_ACTION_DELETE, pcmk__str_none); int rc; // We can't return anything meaningful without a resource ID CRM_CHECK((xml_rsc != NULL) && (pcmk__xe_id(xml_rsc) != NULL), return); rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc); if (rc == -ENOTCONN) { synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, "Not connected to remote executor"); return; } else if ((rc < 0) && !create_rsc) { /* Delete of malformed or nonexistent resource * (deleting something that does not exist is a success) */ crm_debug("Not registering resource '%s' for a %s event " QB_XS " get-rc=%d (%s) transition-key=%s", pcmk__xe_id(xml_rsc), operation, rc, pcmk_strerror(rc), pcmk__xe_id(input->xml)); delete_rsc_entry(lrm_state, input, pcmk__xe_id(xml_rsc), NULL, pcmk_ok, user_name, true); return; } else if (rc == -EINVAL) { // Resource operation on malformed resource crm_err("Invalid resource definition for %s", pcmk__xe_id(xml_rsc)); crm_log_xml_warn(input->msg, "invalid resource"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR, PCMK_OCF_NOT_CONFIGURED, // fatal error "Invalid resource definition"); return; } else if (rc < 0) { // Error communicating with the executor crm_err("Could not register resource '%s' with executor: %s " QB_XS " rc=%d", pcmk__xe_id(xml_rsc), pcmk_strerror(rc), rc); crm_log_xml_warn(input->msg, "failed registration"); synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR, PCMK_OCF_INVALID_PARAM, // hard error "Could not register resource with executor"); return; } if (pcmk__str_eq(operation, PCMK_ACTION_CANCEL, pcmk__str_none)) { if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) { crm_log_xml_warn(input->xml, "Bad command"); } } else if (pcmk__str_eq(operation, PCMK_ACTION_DELETE, pcmk__str_none)) { do_lrm_delete(input, lrm_state, rsc, from_sys, from_host, crm_rsc_delete, user_name); } else { struct ra_metadata_s *md = NULL; /* Getting metadata from cache is OK except for start actions -- * always refresh from the agent for those, in case the resource * agent was updated. * * @TODO Only refresh metadata for starts if the agent actually * changed (using something like inotify, or a hash or modification * time of the agent executable). */ if (strcmp(operation, PCMK_ACTION_START) != 0) { md = controld_get_rsc_metadata(lrm_state, rsc, controld_metadata_from_cache); } if ((md == NULL) && crm_op_needs_metadata(rsc->standard, operation)) { /* Most likely, we'll need the agent metadata to record the * pending operation and the operation result. Get it now rather * than wait until then, so the metadata action doesn't eat into * the real action's timeout. * * @TODO Metadata is retrieved via direct execution of the * agent, which has a couple of related issues: the executor * should execute agents, not the controller; and metadata for * Pacemaker Remote nodes should be collected on those nodes, * not locally. */ struct metadata_cb_data *data = NULL; data = new_metadata_cb_data(rsc, input->xml); crm_info("Retrieving metadata for %s (%s%s%s:%s) asynchronously", rsc->id, rsc->standard, ((rsc->provider == NULL)? "" : ":"), ((rsc->provider == NULL)? "" : rsc->provider), rsc->type); (void) lrmd__metadata_async(rsc, metadata_complete, (void *) data); } else { do_lrm_rsc_op(lrm_state, rsc, input->xml, md); } } lrmd_free_rsc_info(rsc); } else { crm_err("Invalid execution request: unknown command '%s' (bug?)", crm_op); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } static lrmd_event_data_t * construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op, const char *rsc_id, const char *operation) { lrmd_event_data_t *op = NULL; const char *op_delay = NULL; const char *op_timeout = NULL; GHashTable *params = NULL; xmlNode *primitive = NULL; const char *class = NULL; const char *transition = NULL; pcmk__assert((rsc_id != NULL) && (operation != NULL)); op = lrmd_new_event(rsc_id, operation, 0); op->type = lrmd_event_exec_complete; op->timeout = 0; op->start_delay = 0; lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL); if (rsc_op == NULL) { CRM_LOG_ASSERT(pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)); op->user_data = NULL; /* the stop_all_resources() case * by definition there is no DC (or they'd be shutting * us down). * So we should put our version here. */ op->params = pcmk__strkey_table(free, free); pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_trace("Constructed %s op for %s", operation, rsc_id); return op; } params = xml2list(rsc_op); g_hash_table_remove(params, CRM_META "_" PCMK__META_OP_TARGET_RC); op_delay = crm_meta_value(params, PCMK_META_START_DELAY); pcmk__scan_min_int(op_delay, &op->start_delay, 0); op_timeout = crm_meta_value(params, PCMK_META_TIMEOUT); pcmk__scan_min_int(op_timeout, &op->timeout, 0); if (pcmk__guint_from_hash(params, CRM_META "_" PCMK_META_INTERVAL, 0, &(op->interval_ms)) != pcmk_rc_ok) { op->interval_ms = 0; } /* Use pcmk_monitor_timeout instead of meta timeout for stonith recurring monitor, if set */ primitive = pcmk__xe_first_child(rsc_op, PCMK_XE_PRIMITIVE, NULL, NULL); class = pcmk__xe_get(primitive, PCMK_XA_CLASS); if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params) && pcmk__str_eq(operation, PCMK_ACTION_MONITOR, pcmk__str_casei) && (op->interval_ms > 0)) { op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout"); if (op_timeout != NULL) { long long timeout_ms = crm_get_msec(op_timeout); op->timeout = (int) QB_MIN(timeout_ms, INT_MAX); } } if (!pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)) { op->params = params; } else { rsc_history_t *entry = NULL; if (lrm_state) { entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); } /* If we do not have stop parameters cached, use * whatever we are given */ if (!entry || !entry->stop_params) { op->params = params; } else { /* Copy the cached parameter list so that we stop the resource * with the old attributes, not the new ones */ op->params = pcmk__strkey_table(free, free); g_hash_table_foreach(params, copy_meta_keys, op->params); g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params); g_hash_table_destroy(params); params = NULL; } } /* sanity */ if (op->timeout <= 0) { op->timeout = op->interval_ms; } if (op->start_delay < 0) { op->start_delay = 0; } transition = pcmk__xe_get(rsc_op, PCMK__XA_TRANSITION_KEY); CRM_CHECK(transition != NULL, return op); op->user_data = pcmk__str_copy(transition); if (op->interval_ms != 0) { if (pcmk__strcase_any_of(operation, PCMK_ACTION_START, PCMK_ACTION_STOP, NULL)) { crm_err("Start and Stop actions cannot have an interval: %u", op->interval_ms); op->interval_ms = 0; } } crm_trace("Constructed %s op for %s: interval=%u", operation, rsc_id, op->interval_ms); return op; } /*! * \internal * \brief Send a (synthesized) event result * * Reply with a synthesized event result directly, as opposed to going through * the executor. * * \param[in] to_host Host to send result to * \param[in] to_sys IPC name to send result (NULL for transition engine) * \param[in] rsc Type information about resource the result is for * \param[in,out] op Event with result to send * \param[in] rsc_id ID of resource the result is for */ void controld_ack_event_directly(const char *to_host, const char *to_sys, const lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, const char *rsc_id) { xmlNode *reply = NULL; xmlNode *update, *iter; pcmk__node_status_t *peer = NULL; CRM_CHECK(op != NULL, return); if (op->rsc_id == NULL) { // op->rsc_id is a (const char *) but lrmd_free_event() frees it pcmk__assert(rsc_id != NULL); op->rsc_id = pcmk__str_copy(rsc_id); } if (to_sys == NULL) { to_sys = CRM_SYSTEM_TENGINE; } peer = controld_get_local_node_status(); update = create_node_state_update(peer, node_update_none, NULL, __func__); iter = pcmk__xe_create(update, PCMK__XE_LRM); crm_xml_add(iter, PCMK_XA_ID, controld_globals.our_uuid); iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCES); iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCE); crm_xml_add(iter, PCMK_XA_ID, op->rsc_id); controld_add_resource_history_xml(iter, rsc, op, controld_globals.cluster->priv->node_name); /* We don't have the original message ID, so use "direct-ack" (we just need * something non-NULL for this to create a reply) * * @TODO It would be better to use the server, message ID, and task from the * original request when callers have it available */ reply = pcmk__new_message(pcmk_ipc_controld, "direct-ack", CRM_SYSTEM_LRMD, to_host, to_sys, CRM_OP_INVOKE_LRM, update); crm_log_xml_trace(update, "[direct ACK]"); crm_debug("ACK'ing resource op " PCMK__OP_FMT " from %s: %s", op->rsc_id, op->op_type, op->interval_ms, op->user_data, pcmk__xe_get(reply, PCMK_XA_REFERENCE)); if (relay_message(reply, TRUE) == FALSE) { crm_log_xml_err(reply, "Unable to route reply"); } pcmk__xml_free(update); pcmk__xml_free(reply); } gboolean verify_stopped(enum crmd_fsa_state cur_state, int log_level) { gboolean res = TRUE; GList *lrm_state_list = lrm_state_get_list(); GList *state_entry; for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) { lrm_state_t *lrm_state = state_entry->data; if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) { /* keep iterating through all even when false is returned */ res = FALSE; } } controld_set_fsa_input_flags(R_SENT_RSC_STOP); g_list_free(lrm_state_list); lrm_state_list = NULL; return res; } struct stop_recurring_action_s { lrmd_rsc_info_t *rsc; lrm_state_t *lrm_state; }; static gboolean stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; struct stop_recurring_action_s *event = user_data; active_op_t *op = value; if ((op->interval_ms != 0) && pcmk__str_eq(op->rsc_id, event->rsc->id, pcmk__str_none)) { crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key); remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE); } return remove; } static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data) { gboolean remove = FALSE; lrm_state_t *lrm_state = user_data; active_op_t *op = value; if (op->interval_ms != 0) { crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (const char *) key); remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE); } return remove; } /*! * \internal * \brief Check whether recurring actions should be cancelled before an action * * \param[in] rsc_id Resource that action is for * \param[in] action Action being performed * \param[in] interval_ms Operation interval of \p action (in milliseconds) * * \return true if recurring actions should be cancelled, otherwise false */ static bool should_cancel_recurring(const char *rsc_id, const char *action, guint interval_ms) { if (is_remote_lrmd_ra(NULL, NULL, rsc_id) && (interval_ms == 0) && (strcmp(action, PCMK_ACTION_MIGRATE_TO) == 0)) { /* Don't stop monitoring a migrating Pacemaker Remote connection * resource until the entire migration has completed. We must detect if * the connection is unexpectedly severed, even during a migration. */ return false; } // Cancel recurring actions before changing resource state return (interval_ms == 0) && !pcmk__str_any_of(action, PCMK_ACTION_MONITOR, PCMK_ACTION_NOTIFY, NULL); } /*! * \internal * \brief Check whether an action should not be performed at this time * * \param[in] operation Action to be performed * * \return Readable description of why action should not be performed, * or NULL if it should be performed */ static const char * should_nack_action(const char *action) { if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN) && pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) { register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); return "Not attempting start due to shutdown in progress"; } switch (controld_globals.fsa_state) { case S_NOT_DC: case S_POLICY_ENGINE: // Recalculating case S_TRANSITION_ENGINE: break; default: if (!pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) { return "Controller cannot attempt actions at this time"; } break; } return NULL; } static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg, struct ra_metadata_s *md) { int rc; int call_id = 0; char *op_id = NULL; lrmd_event_data_t *op = NULL; fsa_data_t *msg_data = NULL; const char *transition = NULL; const char *operation = NULL; const char *nack_reason = NULL; CRM_CHECK((rsc != NULL) && (msg != NULL), return); operation = pcmk__xe_get(msg, PCMK_XA_OPERATION); CRM_CHECK(!pcmk__str_empty(operation), return); transition = pcmk__xe_get(msg, PCMK__XA_TRANSITION_KEY); if (pcmk__str_empty(transition)) { crm_log_xml_err(msg, "Missing transition number"); } if (lrm_state == NULL) { // This shouldn't be possible, but provide a failsafe just in case crm_err("Cannot execute %s of %s: No executor connection " QB_XS " transition_key=%s", operation, rsc->id, pcmk__s(transition, "")); synthesize_lrmd_failure(NULL, msg, PCMK_EXEC_INVALID, PCMK_OCF_UNKNOWN_ERROR, "No executor connection"); return; } if (pcmk__str_any_of(operation, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT, NULL)) { /* Pre-2.1.0 DCs will schedule reload actions only, and 2.1.0+ DCs * will schedule reload-agent actions only. In either case, we need * to map that to whatever the resource agent actually supports. * Default to the OCF 1.1 name. */ if ((md != NULL) && pcmk_is_set(md->ra_flags, ra_supports_legacy_reload)) { operation = PCMK_ACTION_RELOAD; } else { operation = PCMK_ACTION_RELOAD_AGENT; } } op = construct_op(lrm_state, msg, rsc->id, operation); CRM_CHECK(op != NULL, return); if (should_cancel_recurring(rsc->id, operation, op->interval_ms)) { guint removed = 0; struct stop_recurring_action_s data; data.rsc = rsc; data.lrm_state = lrm_state; removed = g_hash_table_foreach_remove(lrm_state->active_ops, stop_recurring_action_by_rsc, &data); if (removed) { crm_debug("Stopped %u recurring operation%s in preparation for " PCMK__OP_FMT, removed, pcmk__plural_s(removed), rsc->id, operation, op->interval_ms); } } nack_reason = should_nack_action(operation); if (nack_reason != NULL) { crm_notice("Not requesting local execution of %s operation for %s on %s" " in state %s: %s", pcmk__readable_action(op->op_type, op->interval_ms), rsc->id, lrm_state->node_name, fsa_state2string(controld_globals.fsa_state), nack_reason); lrmd__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_INVALID, nack_reason); controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); lrmd_free_event(op); free(op_id); return; } crm_notice("Requesting local execution of %s operation for %s on %s " QB_XS " transition %s", pcmk__readable_action(op->op_type, op->interval_ms), rsc->id, lrm_state->node_name, pcmk__s(transition, "")); controld_record_pending_op(lrm_state->node_name, rsc, op); op_id = pcmk__op_key(rsc->id, op->op_type, op->interval_ms); if (op->interval_ms > 0) { /* cancel it so we can then restart it without conflict */ cancel_op_key(lrm_state, rsc, op_id, FALSE); } rc = controld_execute_resource_agent(lrm_state, rsc->id, op->op_type, op->user_data, op->interval_ms, op->timeout, op->start_delay, op->params, &call_id); if (rc == pcmk_rc_ok) { /* record all operations so we can wait * for them to complete during shutdown */ char *call_id_s = make_stop_id(rsc->id, call_id); active_op_t *pending = NULL; pending = pcmk__assert_alloc(1, sizeof(active_op_t)); crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s); pending->call_id = call_id; pending->interval_ms = op->interval_ms; pending->op_type = pcmk__str_copy(operation); pending->op_key = pcmk__str_copy(op_id); pending->rsc_id = pcmk__str_copy(rsc->id); pending->start_time = time(NULL); pending->user_data = pcmk__str_copy(op->user_data); pcmk__xe_get_time(msg, PCMK_OPT_SHUTDOWN_LOCK, &(pending->lock_time)); g_hash_table_replace(lrm_state->active_ops, call_id_s, pending); if ((op->interval_ms > 0) && (op->start_delay > START_DELAY_THRESHOLD)) { int target_rc = PCMK_OCF_OK; crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id); decode_transition_key(op->user_data, NULL, NULL, NULL, &target_rc); lrmd__set_result(op, target_rc, PCMK_EXEC_DONE, NULL); controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id); } pending->params = op->params; op->params = NULL; } else if (lrm_state_is_local(lrm_state)) { crm_err("Could not initiate %s action for resource %s locally: %s " QB_XS " rc=%d", operation, rsc->id, pcmk_rc_str(rc), rc); fake_op_status(lrm_state, op, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, pcmk_rc_str(rc)); process_lrm_event(lrm_state, op, NULL, NULL); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } else { crm_err("Could not initiate %s action for resource %s remotely on %s: " "%s " QB_XS " rc=%d", operation, rsc->id, lrm_state->node_name, pcmk_rc_str(rc), rc); fake_op_status(lrm_state, op, PCMK_EXEC_NOT_CONNECTED, PCMK_OCF_UNKNOWN_ERROR, pcmk_rc_str(rc)); process_lrm_event(lrm_state, op, NULL, NULL); } free(op_id); lrmd_free_event(op); } static char * unescape_newlines(const char *string) { char *pch = NULL; char *ret = NULL; static const char *escaped_newline = "\\n"; if (!string) { return NULL; } ret = pcmk__str_copy(string); pch = strstr(ret, escaped_newline); while (pch != NULL) { /* Replace newline escape pattern with actual newline (and a space so we * don't have to shuffle the rest of the buffer) */ pch[0] = '\n'; pch[1] = ' '; pch = strstr(pch, escaped_newline); } return ret; } static bool did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id, const char * op_type, guint interval_ms) { rsc_history_t *entry = NULL; CRM_CHECK(lrm_state != NULL, return FALSE); CRM_CHECK(rsc_id != NULL, return FALSE); CRM_CHECK(op_type != NULL, return FALSE); entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id); if (entry == NULL || entry->failed == NULL) { return FALSE; } if (pcmk__str_eq(entry->failed->rsc_id, rsc_id, pcmk__str_none) && pcmk__str_eq(entry->failed->op_type, op_type, pcmk__str_casei) && entry->failed->interval_ms == interval_ms) { return TRUE; } return FALSE; } /*! * \internal * \brief Log the result of an executor action (actual or synthesized) * * \param[in] op Executor action to log result for * \param[in] op_key Operation key for action * \param[in] node_name Name of node action was performed on, if known * \param[in] confirmed Whether to log that graph action was confirmed */ static void log_executor_event(const lrmd_event_data_t *op, const char *op_key, const char *node_name, gboolean confirmed) { int log_level = LOG_ERR; GString *str = g_string_sized_new(100); // reasonable starting size pcmk__g_strcat(str, "Result of ", pcmk__readable_action(op->op_type, op->interval_ms), " operation for ", op->rsc_id, NULL); if (node_name != NULL) { pcmk__g_strcat(str, " on ", node_name, NULL); } switch (op->op_status) { case PCMK_EXEC_DONE: log_level = LOG_NOTICE; pcmk__g_strcat(str, ": ", crm_exit_str((crm_exit_t) op->rc), NULL); break; case PCMK_EXEC_TIMEOUT: pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status), " after ", pcmk__readable_interval(op->timeout), NULL); break; case PCMK_EXEC_CANCELLED: log_level = LOG_INFO; pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status), NULL); break; default: pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status), NULL); break; } if ((op->exit_reason != NULL) && ((op->op_status != PCMK_EXEC_DONE) || (op->rc != PCMK_OCF_OK))) { pcmk__g_strcat(str, " (", op->exit_reason, ")", NULL); } g_string_append(str, " " QB_XS); g_string_append_printf(str, " graph action %sconfirmed; call=%d key=%s", (confirmed? "" : "un"), op->call_id, op_key); if (op->op_status == PCMK_EXEC_DONE) { g_string_append_printf(str, " rc=%d", op->rc); } do_crm_log(log_level, "%s", str->str); g_string_free(str, TRUE); /* The services library has already logged the output at info or debug * level, so just raise to notice if it looks like a failure. */ if ((op->output != NULL) && (op->rc != PCMK_OCF_OK)) { char *prefix = crm_strdup_printf(PCMK__OP_FMT "@%s output", op->rsc_id, op->op_type, op->interval_ms, node_name); crm_log_output(LOG_NOTICE, prefix, op->output); free(prefix); } } void process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op, active_op_t *pending, const xmlNode *action_xml) { char *op_id = NULL; char *op_key = NULL; gboolean remove = FALSE; gboolean removed = FALSE; bool need_direct_ack = FALSE; lrmd_rsc_info_t *rsc = NULL; const char *node_name = NULL; CRM_CHECK(op != NULL, return); CRM_CHECK(op->rsc_id != NULL, return); // Remap new status codes for older DCs if (compare_version(controld_globals.dc_version, "3.2.0") < 0) { switch (op->op_status) { case PCMK_EXEC_NOT_CONNECTED: lrmd__set_result(op, PCMK_OCF_CONNECTION_DIED, PCMK_EXEC_ERROR, op->exit_reason); break; case PCMK_EXEC_INVALID: lrmd__set_result(op, CRM_DIRECT_NACK_RC, PCMK_EXEC_ERROR, op->exit_reason); break; default: break; } } op_id = make_stop_id(op->rsc_id, op->call_id); op_key = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms); // Get resource info if available (from executor state or action XML) if (lrm_state) { rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0); } if ((rsc == NULL) && action_xml) { xmlNode *xml = pcmk__xe_first_child(action_xml, PCMK_XE_PRIMITIVE, NULL, NULL); const char *standard = pcmk__xe_get(xml, PCMK_XA_CLASS); const char *provider = pcmk__xe_get(xml, PCMK_XA_PROVIDER); const char *type = pcmk__xe_get(xml, PCMK_XA_TYPE); if (standard && type) { crm_info("%s agent information not cached, using %s%s%s:%s from action XML", op->rsc_id, standard, (provider? ":" : ""), (provider? provider : ""), type); rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type); } else { crm_err("Can't process %s result because %s agent information not cached or in XML", op_key, op->rsc_id); } } // Get node name if available (from executor state or action XML) if (lrm_state) { node_name = lrm_state->node_name; } else if (action_xml) { node_name = pcmk__xe_get(action_xml, PCMK__META_ON_NODE); } if(pending == NULL) { remove = TRUE; if (lrm_state) { pending = g_hash_table_lookup(lrm_state->active_ops, op_id); } } if (op->op_status == PCMK_EXEC_ERROR) { switch(op->rc) { case PCMK_OCF_NOT_RUNNING: case PCMK_OCF_RUNNING_PROMOTED: case PCMK_OCF_DEGRADED: case PCMK_OCF_DEGRADED_PROMOTED: // Leave it to the TE/scheduler to decide if this is an error op->op_status = PCMK_EXEC_DONE; break; default: /* Nothing to do */ break; } } if (op->op_status != PCMK_EXEC_CANCELLED) { /* We might not record the result, so directly acknowledge it to the * originator instead, so it doesn't time out waiting for the result * (especially important if part of a transition). */ need_direct_ack = TRUE; if (controld_action_is_recordable(op->op_type)) { if (node_name && rsc) { // We should record the result, and happily, we can time_t lock_time = (pending == NULL)? 0 : pending->lock_time; controld_update_resource_history(node_name, rsc, op, lock_time); need_direct_ack = FALSE; } else if (op->rsc_deleted) { /* We shouldn't record the result (likely the resource was * refreshed, cleaned, or removed while this operation was * in flight). */ crm_notice("Not recording %s result in CIB because " "resource information was removed since it was initiated", op_key); } else { /* This shouldn't be possible; the executor didn't consider the * resource deleted, but we couldn't find resource or node * information. */ crm_err("Unable to record %s result in CIB: %s", op_key, (node_name? "No resource information" : "No node name")); } } } else if (op->interval_ms == 0) { /* A non-recurring operation was cancelled. Most likely, the * never-initiated action was removed from the executor's pending * operations list upon resource removal. */ need_direct_ack = TRUE; } else if (pending == NULL) { /* This recurring operation was cancelled, but was not pending. No * transition actions are waiting on it, nothing needs to be done. */ } else if (op->user_data == NULL) { /* This recurring operation was cancelled and pending, but we don't * have a transition key. This should never happen. */ crm_err("Recurring operation %s was cancelled without transition information", op_key); } else if (pcmk_is_set(pending->flags, active_op_remove)) { /* This recurring operation was cancelled (by us) and pending, and we * have been waiting for it to finish. */ if (lrm_state) { controld_delete_action_history(op); } /* Directly acknowledge failed recurring actions here. The above call to * controld_delete_action_history() will not erase any corresponding * last_failure entry, which means that the DC won't confirm the * cancellation via process_op_deletion(), and the transition would * otherwise wait for the action timer to pop. */ if (did_lrm_rsc_op_fail(lrm_state, pending->rsc_id, pending->op_type, pending->interval_ms)) { need_direct_ack = TRUE; } } else if (op->rsc_deleted) { /* This recurring operation was cancelled (but not by us, and the * executor does not have resource information, likely due to resource * cleanup, refresh, or removal) and pending. */ crm_debug("Recurring op %s was cancelled due to resource deletion", op_key); need_direct_ack = TRUE; } else { /* This recurring operation was cancelled (but not by us, likely by the * executor before stopping the resource) and pending. We don't need to * do anything special. */ } if (need_direct_ack) { controld_ack_event_directly(NULL, NULL, NULL, op, op->rsc_id); } if(remove == FALSE) { /* The caller will do this afterwards, but keep the logging consistent */ removed = TRUE; } else if (lrm_state && ((op->interval_ms == 0) || (op->op_status == PCMK_EXEC_CANCELLED))) { gboolean found = g_hash_table_remove(lrm_state->active_ops, op_id); if (op->interval_ms != 0) { removed = TRUE; } else if (found) { removed = TRUE; crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed", op_key, op->call_id, op_id, g_hash_table_size(lrm_state->active_ops)); } } log_executor_event(op, op_key, node_name, removed); if (lrm_state) { if (!pcmk__str_eq(op->op_type, PCMK_ACTION_META_DATA, pcmk__str_casei)) { crmd_alert_resource_op(lrm_state->node_name, op); } else if (rsc && (op->rc == PCMK_OCF_OK)) { char *metadata = unescape_newlines(op->output); controld_cache_metadata(lrm_state->metadata_cache, rsc, metadata); free(metadata); } } if (op->rsc_deleted) { crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); if (lrm_state) { delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL, true); } } /* If a shutdown was escalated while operations were pending, * then the FSA will be stalled right now... allow it to continue */ controld_trigger_fsa(); if (lrm_state && rsc) { update_history_cache(lrm_state, rsc, op); } lrmd_free_rsc_info(rsc); free(op_key); free(op_id); } diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c index cfeb78a860..c142632116 100644 --- a/daemons/controld/controld_join_dc.c +++ b/daemons/controld/controld_join_dc.c @@ -1,1097 +1,1097 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include // PRIu32 #include // bool, true, false #include // NULL #include // free(), etc. #include // gboolean, etc. #include // xmlNode #include #include #include #include static char *max_generation_from = NULL; static xmlNodePtr max_generation_xml = NULL; /*! * \internal * \brief Nodes from which a CIB sync has failed since the peer joined * * This table is of the form (node_name -> join_id). \p node_name is * the name of a client node from which a CIB \p sync_from() call has failed in * \p do_dc_join_finalize() since the client joined the cluster as a peer. * \p join_id is the ID of the join round in which the \p sync_from() failed, * and is intended for use in nack log messages. */ static GHashTable *failed_sync_nodes = NULL; void finalize_join_for(gpointer key, gpointer value, gpointer user_data); void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data); gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); /* Numeric counter used to identify join rounds (an unsigned int would be * appropriate, except we get and set it in XML as int) */ static int current_join_id = 0; /*! * \internal * \brief Get log-friendly string equivalent of a controller group join phase * * \param[in] phase Join phase * * \return Log-friendly string equivalent of \p phase */ static const char * join_phase_text(enum controld_join_phase phase) { switch (phase) { case controld_join_nack: return "nack"; case controld_join_none: return "none"; case controld_join_welcomed: return "welcomed"; case controld_join_integrated: return "integrated"; case controld_join_finalized: return "finalized"; case controld_join_confirmed: return "confirmed"; default: return "invalid"; } } /*! * \internal * \brief Destroy the hash table containing failed sync nodes */ void controld_destroy_failed_sync_table(void) { if (failed_sync_nodes != NULL) { g_hash_table_destroy(failed_sync_nodes); failed_sync_nodes = NULL; } } /*! * \internal * \brief Remove a node from the failed sync nodes table if present * * \param[in] node_name Node name to remove */ void controld_remove_failed_sync_node(const char *node_name) { if (failed_sync_nodes != NULL) { g_hash_table_remove(failed_sync_nodes, (gchar *) node_name); } } /*! * \internal * \brief Add to a hash table a node whose CIB failed to sync * * \param[in] node_name Name of node whose CIB failed to sync * \param[in] join_id Join round when the failure occurred */ static void record_failed_sync_node(const char *node_name, gint join_id) { if (failed_sync_nodes == NULL) { failed_sync_nodes = pcmk__strikey_table(g_free, NULL); } /* If the node is already in the table then we failed to nack it during the * filter offer step */ CRM_LOG_ASSERT(g_hash_table_insert(failed_sync_nodes, g_strdup(node_name), GINT_TO_POINTER(join_id))); } /*! * \internal * \brief Look up a node name in the failed sync table * * \param[in] node_name Name of node to look up * \param[out] join_id Where to store the join ID of when the sync failed * * \return Standard Pacemaker return code. Specifically, \p pcmk_rc_ok if the * node name was found, or \p pcmk_rc_node_unknown otherwise. * \note \p *join_id is set to -1 if the node is not found. */ static int lookup_failed_sync_node(const char *node_name, gint *join_id) { *join_id = -1; if (failed_sync_nodes != NULL) { gpointer result = g_hash_table_lookup(failed_sync_nodes, (gchar *) node_name); if (result != NULL) { *join_id = GPOINTER_TO_INT(result); return pcmk_rc_ok; } } return pcmk_rc_node_unknown; } void crm_update_peer_join(const char *source, pcmk__node_status_t *node, enum controld_join_phase phase) { enum controld_join_phase last = controld_get_join_phase(node); CRM_CHECK(node != NULL, return); /* Remote nodes do not participate in joins */ if (pcmk_is_set(node->flags, pcmk__node_status_remote)) { return; } if (phase == last) { crm_trace("Node %s join-%d phase is still %s " QB_XS " nodeid=%" PRIu32 " source=%s", node->name, current_join_id, join_phase_text(last), node->cluster_layer_id, source); return; } if ((phase <= controld_join_none) || (phase == (last + 1))) { struct controld_node_status_data *data = NULL; if (node->user_data == NULL) { node->user_data = pcmk__assert_alloc(1, sizeof(struct controld_node_status_data)); } data = node->user_data; data->join_phase = phase; crm_trace("Node %s join-%d phase is now %s (was %s) " QB_XS " nodeid=%" PRIu32 " source=%s", node->name, current_join_id, join_phase_text(phase), join_phase_text(last), node->cluster_layer_id, source); return; } crm_warn("Rejecting join-%d phase update for node %s because can't go from " "%s to %s " QB_XS " nodeid=%" PRIu32 " source=%s", current_join_id, node->name, join_phase_text(last), join_phase_text(phase), node->cluster_layer_id, source); } static void start_join_round(void) { GHashTableIter iter; pcmk__node_status_t *peer = NULL; crm_debug("Starting new join round join-%d", current_join_id); g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { crm_update_peer_join(__func__, peer, controld_join_none); } if (max_generation_from != NULL) { free(max_generation_from); max_generation_from = NULL; } if (max_generation_xml != NULL) { pcmk__xml_free(max_generation_xml); max_generation_xml = NULL; } controld_clear_fsa_input_flags(R_HAVE_CIB); } /*! * \internal * \brief Create a join message from the DC * * \param[in] join_op Join operation name * \param[in] host_to Recipient of message */ static xmlNode * create_dc_message(const char *join_op, const char *host_to) { xmlNode *msg = pcmk__new_request(pcmk_ipc_controld, CRM_SYSTEM_DC, host_to, CRM_SYSTEM_CRMD, join_op, NULL); /* Identify which election this is a part of */ crm_xml_add_int(msg, PCMK__XA_JOIN_ID, current_join_id); /* Add a field specifying whether the DC is shutting down. This keeps the * joining node from fencing the old DC if it becomes the new DC. */ pcmk__xe_set_bool_attr(msg, PCMK__XA_DC_LEAVING, pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)); return msg; } static void join_make_offer(gpointer key, gpointer value, gpointer user_data) { /* @TODO We don't use user_data except to distinguish one particular call * from others. Make this clearer. */ xmlNode *offer = NULL; pcmk__node_status_t *member = (pcmk__node_status_t *) value; pcmk__assert(member != NULL); if (!pcmk__cluster_is_node_active(member)) { crm_info("Not making join-%d offer to inactive node %s", current_join_id, pcmk__s(member->name, "with unknown name")); if ((member->expected == NULL) && pcmk__str_eq(member->state, PCMK__VALUE_LOST, pcmk__str_none)) { /* You would think this unsafe, but in fact this plus an * active resource is what causes it to be fenced. * * Yes, this does mean that any node that dies at the same * time as the old DC and is not running resource (still) * won't be fenced. * * I'm not happy about this either. */ pcmk__update_peer_expected(__func__, member, CRMD_JOINSTATE_DOWN); } return; } if (member->name == NULL) { crm_info("Not making join-%d offer to node uuid %s with unknown name", current_join_id, member->xml_id); return; } if (controld_globals.membership_id != controld_globals.peer_seq) { controld_globals.membership_id = controld_globals.peer_seq; crm_info("Making join-%d offers based on membership event %llu", current_join_id, controld_globals.peer_seq); } if (user_data != NULL) { enum controld_join_phase phase = controld_get_join_phase(member); if (phase > controld_join_none) { crm_info("Not making join-%d offer to already known node %s (%s)", current_join_id, member->name, join_phase_text(phase)); return; } } crm_update_peer_join(__func__, (pcmk__node_status_t*) member, controld_join_none); offer = create_dc_message(CRM_OP_JOIN_OFFER, member->name); // Advertise our feature set so the joining node can bail if not compatible crm_xml_add(offer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_info("Sending join-%d offer to %s", current_join_id, member->name); pcmk__cluster_send_message(member, pcmk_ipc_controld, offer); pcmk__xml_free(offer); crm_update_peer_join(__func__, member, controld_join_welcomed); } /* A_DC_JOIN_OFFER_ALL */ void do_dc_join_offer_all(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int count; /* Reset everyone's status back to down or in_ccm in the CIB. * Any nodes that are active in the CIB but not in the cluster membership * will be seen as offline by the scheduler anyway. */ current_join_id++; start_join_round(); update_dc(NULL); if (cause == C_HA_MESSAGE && current_input == I_NODE_JOIN) { crm_info("A new node joined the cluster"); } g_hash_table_foreach(pcmk__peer_cache, join_make_offer, NULL); count = crmd_join_phase_count(controld_join_welcomed); crm_info("Waiting on join-%d requests from %d outstanding node%s", current_join_id, count, pcmk__plural_s(count)); // Don't waste time by invoking the scheduler yet } /* A_DC_JOIN_OFFER_ONE */ void do_dc_join_offer_one(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { pcmk__node_status_t *member = NULL; ha_msg_input_t *welcome = NULL; int count; const char *join_to = NULL; if (msg_data->data == NULL) { crm_info("Making join-%d offers to any unconfirmed nodes " "because an unknown node joined", current_join_id); g_hash_table_foreach(pcmk__peer_cache, join_make_offer, &member); check_join_state(cur_state, __func__); return; } welcome = fsa_typed_data(fsa_dt_ha_msg); if (welcome == NULL) { // fsa_typed_data() already logged an error return; } join_to = pcmk__xe_get(welcome->msg, PCMK__XA_SRC); if (join_to == NULL) { crm_err("Can't make join-%d offer to unknown node", current_join_id); return; } member = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster_member); /* It is possible that a node will have been sick or starting up when the * original offer was made. However, it will either re-announce itself in * due course, or we can re-store the original offer on the client. */ crm_update_peer_join(__func__, member, controld_join_none); join_make_offer(NULL, member, NULL); /* If the offer isn't to the local node, make an offer to the local node as * well, to ensure the correct value for max_generation_from. */ if (!controld_is_local_node(join_to)) { member = controld_get_local_node_status(); join_make_offer(NULL, member, NULL); } /* This was a genuine join request; cancel any existing transition and * invoke the scheduler. */ abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Node join", NULL); count = crmd_join_phase_count(controld_join_welcomed); crm_info("Waiting on join-%d requests from %d outstanding node%s", current_join_id, count, pcmk__plural_s(count)); // Don't waste time by invoking the scheduler yet } static int compare_int_fields(xmlNode * left, xmlNode * right, const char *field) { const char *elem_l = pcmk__xe_get(left, field); const char *elem_r = pcmk__xe_get(right, field); long long int_elem_l; long long int_elem_r; int rc = pcmk_rc_ok; rc = pcmk__scan_ll(elem_l, &int_elem_l, -1LL); if (rc != pcmk_rc_ok) { // Shouldn't be possible crm_warn("Comparing current CIB %s as -1 " "because '%s' is not an integer", field, elem_l); } rc = pcmk__scan_ll(elem_r, &int_elem_r, -1LL); if (rc != pcmk_rc_ok) { // Shouldn't be possible crm_warn("Comparing joining node's CIB %s as -1 " "because '%s' is not an integer", field, elem_r); } if (int_elem_l < int_elem_r) { return -1; } else if (int_elem_l > int_elem_r) { return 1; } return 0; } /* A_DC_JOIN_PROCESS_REQ */ void do_dc_join_filter_offer(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { xmlNode *generation = NULL; int cmp = 0; int join_id = -1; int count = 0; gint value = 0; gboolean ack_nack_bool = TRUE; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); const char *join_from = pcmk__xe_get(join_ack->msg, PCMK__XA_SRC); const char *ref = pcmk__xe_get(join_ack->msg, PCMK_XA_REFERENCE); const char *join_version = pcmk__xe_get(join_ack->msg, PCMK_XA_CRM_FEATURE_SET); pcmk__node_status_t *join_node = NULL; if (join_from == NULL) { crm_err("Ignoring invalid join request without node name"); return; } join_node = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster_member); pcmk__xe_get_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id); if (join_id != current_join_id) { crm_debug("Ignoring join-%d request from %s because we are on join-%d", join_id, join_from, current_join_id); check_join_state(cur_state, __func__); return; } generation = join_ack->xml; if (max_generation_xml != NULL && generation != NULL) { int lpc = 0; const char *attributes[] = { PCMK_XA_ADMIN_EPOCH, PCMK_XA_EPOCH, PCMK_XA_NUM_UPDATES, }; /* It's not obvious that join_ack->xml is the PCMK__XE_GENERATION_TUPLE * element from the join client. The "if" guard is for clarity. */ if (pcmk__xe_is(generation, PCMK__XE_GENERATION_TUPLE)) { for (lpc = 0; cmp == 0 && lpc < PCMK__NELEM(attributes); lpc++) { cmp = compare_int_fields(max_generation_xml, generation, attributes[lpc]); } } else { // Should always be PCMK__XE_GENERATION_TUPLE CRM_LOG_ASSERT(false); } } if (ref == NULL) { ref = "none"; // for logging only } if (lookup_failed_sync_node(join_from, &value) == pcmk_rc_ok) { crm_err("Rejecting join-%d request from node %s because we failed to " "sync its CIB in join-%d " QB_XS " ref=%s", join_id, join_from, value, ref); ack_nack_bool = FALSE; } else if (!pcmk__cluster_is_node_active(join_node)) { if (match_down_event(join_from) != NULL) { /* The join request was received after the node was fenced or * otherwise shutdown in a way that we're aware of. No need to log * an error in this rare occurrence; we know the client was recently * shut down, and receiving a lingering in-flight request is not * cause for alarm. */ crm_debug("Rejecting join-%d request from inactive node %s " QB_XS " ref=%s", join_id, join_from, ref); } else { crm_err("Rejecting join-%d request from inactive node %s " QB_XS " ref=%s", join_id, join_from, ref); } ack_nack_bool = FALSE; } else if (generation == NULL) { crm_err("Rejecting invalid join-%d request from node %s " "missing CIB generation " QB_XS " ref=%s", join_id, join_from, ref); ack_nack_bool = FALSE; } else if ((join_version == NULL) || !feature_set_compatible(CRM_FEATURE_SET, join_version)) { crm_err("Rejecting join-%d request from node %s because feature set %s" " is incompatible with ours (%s) " QB_XS " ref=%s", join_id, join_from, (join_version? join_version : "pre-3.1.0"), CRM_FEATURE_SET, ref); ack_nack_bool = FALSE; } else if (max_generation_xml == NULL) { const char *validation = pcmk__xe_get(generation, PCMK_XA_VALIDATE_WITH); if (pcmk__get_schema(validation) == NULL) { crm_err("Rejecting join-%d request from %s (with first CIB " "generation) due to %s schema version %s " QB_XS " ref=%s", join_id, join_from, ((validation == NULL)? "missing" : "unknown"), pcmk__s(validation, ""), ref); ack_nack_bool = FALSE; } else { crm_debug("Accepting join-%d request from %s (with first CIB " "generation) " QB_XS " ref=%s", join_id, join_from, ref); max_generation_xml = pcmk__xml_copy(NULL, generation); pcmk__str_update(&max_generation_from, join_from); } } else if ((cmp < 0) || ((cmp == 0) && controld_is_local_node(join_from))) { const char *validation = pcmk__xe_get(generation, PCMK_XA_VALIDATE_WITH); if (pcmk__get_schema(validation) == NULL) { crm_err("Rejecting join-%d request from %s (with better CIB " "generation than current best from %s) due to %s " "schema version %s " QB_XS " ref=%s", join_id, join_from, max_generation_from, ((validation == NULL)? "missing" : "unknown"), pcmk__s(validation, ""), ref); ack_nack_bool = FALSE; } else { crm_debug("Accepting join-%d request from %s (with better CIB " "generation than current best from %s) " QB_XS " ref=%s", join_id, join_from, max_generation_from, ref); crm_log_xml_debug(max_generation_xml, "Old max generation"); crm_log_xml_debug(generation, "New max generation"); pcmk__xml_free(max_generation_xml); max_generation_xml = pcmk__xml_copy(NULL, join_ack->xml); pcmk__str_update(&max_generation_from, join_from); } } else { crm_debug("Accepting join-%d request from %s " QB_XS " ref=%s", join_id, join_from, ref); } if (!ack_nack_bool) { crm_update_peer_join(__func__, join_node, controld_join_nack); pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_NACK); } else { crm_update_peer_join(__func__, join_node, controld_join_integrated); pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER); } count = crmd_join_phase_count(controld_join_integrated); crm_debug("%d node%s currently integrated in join-%d", count, pcmk__plural_s(count), join_id); if (check_join_state(cur_state, __func__) == FALSE) { // Don't waste time by invoking the scheduler yet count = crmd_join_phase_count(controld_join_welcomed); crm_debug("Waiting on join-%d requests from %d outstanding node%s", join_id, count, pcmk__plural_s(count)); } } /* A_DC_JOIN_FINALIZE */ void do_dc_join_finalize(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { char *sync_from = NULL; int rc = pcmk_ok; int count_welcomed = crmd_join_phase_count(controld_join_welcomed); int count_finalizable = crmd_join_phase_count(controld_join_integrated) + crmd_join_phase_count(controld_join_nack); /* This we can do straight away and avoid clients timing us out * while we compute the latest CIB */ if (count_welcomed != 0) { crm_debug("Waiting on join-%d requests from %d outstanding node%s " "before finalizing join", current_join_id, count_welcomed, pcmk__plural_s(count_welcomed)); crmd_join_phase_log(LOG_DEBUG); /* crmd_fsa_stall(FALSE); Needed? */ return; } else if (count_finalizable == 0) { crm_debug("Finalization not needed for join-%d at the current time", current_join_id); crmd_join_phase_log(LOG_DEBUG); check_join_state(controld_globals.fsa_state, __func__); return; } controld_clear_fsa_input_flags(R_HAVE_CIB); if ((max_generation_from == NULL) || controld_is_local_node(max_generation_from)) { controld_set_fsa_input_flags(R_HAVE_CIB); } if (!controld_globals.transition_graph->complete) { crm_warn("Delaying join-%d finalization while transition in progress", current_join_id); crmd_join_phase_log(LOG_DEBUG); crmd_fsa_stall(FALSE); return; } if (pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { // Send our CIB out to everyone sync_from = pcmk__str_copy(controld_globals.cluster->priv->node_name); } else { // Ask for the agreed best CIB sync_from = pcmk__str_copy(max_generation_from); } crm_notice("Finalizing join-%d for %d node%s (sync'ing CIB %s.%s.%s " "with schema %s and feature set %s from %s)", current_join_id, count_finalizable, pcmk__plural_s(count_finalizable), pcmk__xe_get(max_generation_xml, PCMK_XA_ADMIN_EPOCH), pcmk__xe_get(max_generation_xml, PCMK_XA_EPOCH), pcmk__xe_get(max_generation_xml, PCMK_XA_NUM_UPDATES), pcmk__xe_get(max_generation_xml, PCMK_XA_VALIDATE_WITH), pcmk__xe_get(max_generation_xml, PCMK_XA_CRM_FEATURE_SET), sync_from); crmd_join_phase_log(LOG_DEBUG); rc = controld_globals.cib_conn->cmds->sync_from(controld_globals.cib_conn, sync_from, NULL, cib_none); fsa_register_cib_callback(rc, sync_from, finalize_sync_callback); } void free_max_generation(void) { free(max_generation_from); max_generation_from = NULL; pcmk__xml_free(max_generation_xml); max_generation_xml = NULL; } void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { CRM_LOG_ASSERT(-EPERM != rc); if (rc != pcmk_ok) { const char *sync_from = (const char *) user_data; do_crm_log(((rc == -pcmk_err_old_data)? LOG_WARNING : LOG_ERR), "Could not sync CIB from %s in join-%d: %s", sync_from, current_join_id, pcmk_strerror(rc)); if (rc != -pcmk_err_old_data) { record_failed_sync_node(sync_from, current_join_id); } /* restart the whole join process */ register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION_DC, NULL, NULL, __func__); } else if (!AM_I_DC) { crm_debug("Sync'ed CIB for join-%d but no longer DC", current_join_id); } else if (controld_globals.fsa_state != S_FINALIZE_JOIN) { crm_debug("Sync'ed CIB for join-%d but no longer in S_FINALIZE_JOIN " "(%s)", current_join_id, fsa_state2string(controld_globals.fsa_state)); } else { controld_set_fsa_input_flags(R_HAVE_CIB); /* make sure dc_uuid is re-set to us */ if (!check_join_state(controld_globals.fsa_state, __func__)) { int count_finalizable = 0; count_finalizable = crmd_join_phase_count(controld_join_integrated) + crmd_join_phase_count(controld_join_nack); crm_debug("Notifying %d node%s of join-%d results", count_finalizable, pcmk__plural_s(count_finalizable), current_join_id); g_hash_table_foreach(pcmk__peer_cache, finalize_join_for, NULL); } } } static void join_node_state_commit_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { const char *node = user_data; if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; // for register_fsa_error() macro crm_crit("join-%d node history update (via CIB call %d) for node %s " "failed: %s", current_join_id, call_id, node, pcmk_strerror(rc)); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } crm_debug("join-%d node history update (via CIB call %d) for node %s " "complete", current_join_id, call_id, node); check_join_state(controld_globals.fsa_state, __func__); } /* A_DC_JOIN_PROCESS_ACK */ void do_dc_join_ack(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int join_id = -1; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); const char *op = pcmk__xe_get(join_ack->msg, PCMK__XA_CRM_TASK); - char *join_from = crm_element_value_copy(join_ack->msg, PCMK__XA_SRC); + char *join_from = pcmk__xe_get_copy(join_ack->msg, PCMK__XA_SRC); pcmk__node_status_t *peer = NULL; enum controld_join_phase phase = controld_join_none; enum controld_section_e section = controld_section_lrm; char *xpath = NULL; xmlNode *state = join_ack->xml; xmlNode *execd_state = NULL; cib_t *cib = controld_globals.cib_conn; int rc = pcmk_ok; // Sanity checks if (join_from == NULL) { crm_warn("Ignoring message received without node identification"); goto done; } if (op == NULL) { crm_warn("Ignoring message received from %s without task", join_from); goto done; } if (strcmp(op, CRM_OP_JOIN_CONFIRM)) { crm_debug("Ignoring '%s' message from %s while waiting for '%s'", op, join_from, CRM_OP_JOIN_CONFIRM); goto done; } if (pcmk__xe_get_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id) != pcmk_rc_ok) { crm_warn("Ignoring join confirmation from %s without valid join ID", join_from); goto done; } peer = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster_member); phase = controld_get_join_phase(peer); if (phase != controld_join_finalized) { crm_info("Ignoring out-of-sequence join-%d confirmation from %s " "(currently %s not %s)", join_id, join_from, join_phase_text(phase), join_phase_text(controld_join_finalized)); goto done; } if (join_id != current_join_id) { crm_err("Rejecting join-%d confirmation from %s " "because currently on join-%d", join_id, join_from, current_join_id); crm_update_peer_join(__func__, peer, controld_join_nack); goto done; } crm_update_peer_join(__func__, peer, controld_join_confirmed); /* Update CIB with node's current executor state. A new transition will be * triggered later, when the CIB manager notifies us of the change. * * The delete and modify requests are part of an atomic transaction. */ rc = cib->cmds->init_transaction(cib); if (rc != pcmk_ok) { goto done; } // Delete relevant parts of node's current executor state from CIB if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { section = controld_section_lrm_unlocked; } controld_node_state_deletion_strings(join_from, section, &xpath, NULL); rc = cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_multiple|cib_transaction); if (rc != pcmk_ok) { goto done; } // Update CIB with node's latest known executor state if (controld_is_local_node(join_from)) { // Use the latest possible state if processing our own join ack execd_state = controld_query_executor_state(); if (execd_state != NULL) { crm_debug("Updating local node history for join-%d from query " "result", current_join_id); state = execd_state; } else { crm_warn("Updating local node history from join-%d confirmation " "because query failed", current_join_id); } } else { crm_debug("Updating node history for %s from join-%d confirmation", join_from, current_join_id); } rc = cib->cmds->modify(cib, PCMK_XE_STATUS, state, cib_can_create|cib_transaction); pcmk__xml_free(execd_state); if (rc != pcmk_ok) { goto done; } // Commit the transaction rc = cib->cmds->end_transaction(cib, true, cib_none); fsa_register_cib_callback(rc, join_from, join_node_state_commit_callback); if (rc > 0) { // join_from will be freed after callback join_from = NULL; rc = pcmk_ok; } done: if (rc != pcmk_ok) { crm_crit("join-%d node history update for node %s failed: %s", current_join_id, join_from, pcmk_strerror(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } free(join_from); free(xpath); } void finalize_join_for(gpointer key, gpointer value, gpointer user_data) { xmlNode *acknak = NULL; xmlNode *tmp1 = NULL; pcmk__node_status_t *join_node = value; const char *join_to = join_node->name; enum controld_join_phase phase = controld_get_join_phase(join_node); bool integrated = false; switch (phase) { case controld_join_integrated: integrated = true; break; case controld_join_nack: break; default: crm_trace("Not updating non-integrated and non-nacked node %s (%s) " "for join-%d", join_to, join_phase_text(phase), current_join_id); return; } /* Update the element with the node's name and UUID, in case they * weren't known before */ crm_trace("Updating node name and UUID in CIB for %s", join_to); tmp1 = pcmk__xe_create(NULL, PCMK_XE_NODE); crm_xml_add(tmp1, PCMK_XA_ID, pcmk__cluster_get_xml_id(join_node)); crm_xml_add(tmp1, PCMK_XA_UNAME, join_to); fsa_cib_anon_update(PCMK_XE_NODES, tmp1); pcmk__xml_free(tmp1); join_node = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster_member); if (!pcmk__cluster_is_node_active(join_node)) { /* * NACK'ing nodes that the membership layer doesn't know about yet * simply creates more churn * * Better to leave them waiting and let the join restart when * the new membership event comes in * * All other NACKs (due to versions etc) should still be processed */ pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_PENDING); return; } // Acknowledge or nack node's join request crm_debug("%sing join-%d request from %s", integrated? "Acknowledg" : "Nack", current_join_id, join_to); acknak = create_dc_message(CRM_OP_JOIN_ACKNAK, join_to); pcmk__xe_set_bool_attr(acknak, CRM_OP_JOIN_ACKNAK, integrated); if (integrated) { // No change needed for a nacked node crm_update_peer_join(__func__, join_node, controld_join_finalized); pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER); /* Iterate through the remote peer cache and add information on which * node hosts each to the ACK message. This keeps new controllers in * sync with what has already happened. */ if (pcmk__cluster_num_remote_nodes() > 0) { GHashTableIter iter; pcmk__node_status_t *node = NULL; xmlNode *remotes = pcmk__xe_create(acknak, PCMK_XE_NODES); g_hash_table_iter_init(&iter, pcmk__remote_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { xmlNode *remote = NULL; if (!node->conn_host) { continue; } remote = pcmk__xe_create(remotes, PCMK_XE_NODE); pcmk__xe_set_props(remote, PCMK_XA_ID, node->name, PCMK__XA_NODE_STATE, node->state, PCMK__XA_CONNECTION_HOST, node->conn_host, NULL); } } } pcmk__cluster_send_message(join_node, pcmk_ipc_controld, acknak); pcmk__xml_free(acknak); return; } gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source) { static unsigned long long highest_seq = 0; if (controld_globals.membership_id != controld_globals.peer_seq) { crm_debug("join-%d: Membership changed from %llu to %llu " QB_XS " highest=%llu state=%s for=%s", current_join_id, controld_globals.membership_id, controld_globals.peer_seq, highest_seq, fsa_state2string(cur_state), source); if (highest_seq < controld_globals.peer_seq) { /* Don't spam the FSA with duplicates */ highest_seq = controld_globals.peer_seq; register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL); } } else if (cur_state == S_INTEGRATION) { if (crmd_join_phase_count(controld_join_welcomed) == 0) { int count = crmd_join_phase_count(controld_join_integrated); crm_debug("join-%d: Integration of %d peer%s complete " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); register_fsa_input_before(C_FSA_INTERNAL, I_INTEGRATED, NULL); return TRUE; } } else if (cur_state == S_FINALIZE_JOIN) { if (!pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { crm_debug("join-%d: Delaying finalization until we have CIB " QB_XS " state=%s for=%s", current_join_id, fsa_state2string(cur_state), source); return TRUE; } else if (crmd_join_phase_count(controld_join_welcomed) != 0) { int count = crmd_join_phase_count(controld_join_welcomed); crm_debug("join-%d: Still waiting on %d welcomed node%s " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); crmd_join_phase_log(LOG_DEBUG); } else if (crmd_join_phase_count(controld_join_integrated) != 0) { int count = crmd_join_phase_count(controld_join_integrated); crm_debug("join-%d: Still waiting on %d integrated node%s " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); crmd_join_phase_log(LOG_DEBUG); } else if (crmd_join_phase_count(controld_join_finalized) != 0) { int count = crmd_join_phase_count(controld_join_finalized); crm_debug("join-%d: Still waiting on %d finalized node%s " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); crmd_join_phase_log(LOG_DEBUG); } else { crm_debug("join-%d: Complete " QB_XS " state=%s for=%s", current_join_id, fsa_state2string(cur_state), source); register_fsa_input_later(C_FSA_INTERNAL, I_FINALIZED, NULL); return TRUE; } } return FALSE; } void do_dc_join_final(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_debug("Ensuring DC, quorum and node attributes are up-to-date"); crm_update_quorum(pcmk__cluster_has_quorum(), TRUE); } int crmd_join_phase_count(enum controld_join_phase phase) { int count = 0; pcmk__node_status_t *peer; GHashTableIter iter; g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { if (controld_get_join_phase(peer) == phase) { count++; } } return count; } void crmd_join_phase_log(int level) { pcmk__node_status_t *peer; GHashTableIter iter; g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { do_crm_log(level, "join-%d: %s=%s", current_join_id, peer->name, join_phase_text(controld_get_join_phase(peer))); } } diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c index 93bbcb4d6b..b96ce55ba8 100644 --- a/daemons/execd/execd_commands.c +++ b/daemons/execd/execd_commands.c @@ -1,2002 +1,2002 @@ /* * Copyright 2012-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include // xmlNode // Check whether we have a high-resolution monotonic clock #undef PCMK__TIME_USE_CGT #if HAVE_DECL_CLOCK_MONOTONIC && defined(CLOCK_MONOTONIC) # define PCMK__TIME_USE_CGT # include /* clock_gettime */ #endif #include #include #include #include #include #include #include #include #include #include "pacemaker-execd.h" GHashTable *rsc_list = NULL; typedef struct lrmd_cmd_s { int timeout; guint interval_ms; int start_delay; int timeout_orig; int call_id; int call_opts; /* Timer ids, must be removed on cmd destruction. */ int delay_id; int stonith_recurring_id; int rsc_deleted; int service_flags; char *client_id; char *origin; char *rsc_id; char *action; char *real_action; char *userdata_str; pcmk__action_result_t result; /* We can track operation queue time and run time, to be saved with the CIB * resource history (and displayed in cluster status). We need * high-resolution monotonic time for this purpose, so we use * clock_gettime(CLOCK_MONOTONIC, ...) (if available, otherwise this feature * is disabled). * * However, we also need epoch timestamps for recording the time the command * last ran and the time its return value last changed, for use in time * displays (as opposed to interval calculations). We keep time_t values for * this purpose. * * The last run time is used for both purposes, so we keep redundant * monotonic and epoch values for this. Technically the two could represent * different times, but since time_t has only second resolution and the * values are used for distinct purposes, that is not significant. */ #ifdef PCMK__TIME_USE_CGT /* Recurring and systemd operations may involve more than one executor * command per operation, so they need info about the original and the most * recent. */ struct timespec t_first_run; // When op first ran struct timespec t_run; // When op most recently ran struct timespec t_first_queue; // When op was first queued struct timespec t_queue; // When op was most recently queued #endif time_t epoch_last_run; // Epoch timestamp of when op last ran time_t epoch_rcchange; // Epoch timestamp of when rc last changed bool first_notify_sent; int last_notify_rc; int last_notify_op_status; int last_pid; GHashTable *params; } lrmd_cmd_t; static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc); static gboolean execute_resource_action(gpointer user_data); static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id); #ifdef PCMK__TIME_USE_CGT /*! * \internal * \brief Check whether a struct timespec has been set * * \param[in] timespec Time to check * * \return true if timespec has been set (i.e. is nonzero), false otherwise */ static inline bool time_is_set(const struct timespec *timespec) { return (timespec != NULL) && ((timespec->tv_sec != 0) || (timespec->tv_nsec != 0)); } /* * \internal * \brief Set a timespec (and its original if unset) to the current time * * \param[out] t_current Where to store current time * \param[out] t_orig Where to copy t_current if unset */ static void get_current_time(struct timespec *t_current, struct timespec *t_orig) { clock_gettime(CLOCK_MONOTONIC, t_current); if ((t_orig != NULL) && !time_is_set(t_orig)) { *t_orig = *t_current; } } /*! * \internal * \brief Return difference between two times in milliseconds * * \param[in] now More recent time (or NULL to use current time) * \param[in] old Earlier time * * \return milliseconds difference (or 0 if old is NULL or unset) * * \note Can overflow on 32bit machines when the differences is around * 24 days or more. */ static int time_diff_ms(const struct timespec *now, const struct timespec *old) { int diff_ms = 0; if (time_is_set(old)) { struct timespec local_now = { 0, }; if (now == NULL) { clock_gettime(CLOCK_MONOTONIC, &local_now); now = &local_now; } diff_ms = (now->tv_sec - old->tv_sec) * 1000 + (now->tv_nsec - old->tv_nsec) / 1000000; } return diff_ms; } /*! * \internal * \brief Reset a command's operation times to their original values. * * Reset a command's run and queued timestamps to the timestamps of the original * command, so we report the entire time since then and not just the time since * the most recent command (for recurring and systemd operations). * * \param[in,out] cmd Executor command object to reset * * \note It's not obvious what the queued time should be for a systemd * start/stop operation, which might go like this: * initial command queued 5ms, runs 3s * monitor command queued 10ms, runs 10s * monitor command queued 10ms, runs 10s * Is the queued time for that operation 5ms, 10ms or 25ms? The current * implementation will report 5ms. If it's 25ms, then we need to * subtract 20ms from the total exec time so as not to count it twice. * We can implement that later if it matters to anyone ... */ static void cmd_original_times(lrmd_cmd_t * cmd) { cmd->t_run = cmd->t_first_run; cmd->t_queue = cmd->t_first_queue; } #endif static inline bool action_matches(const lrmd_cmd_t *cmd, const char *action, guint interval_ms) { return (cmd->interval_ms == interval_ms) && pcmk__str_eq(cmd->action, action, pcmk__str_casei); } /*! * \internal * \brief Log the result of an asynchronous command * * \param[in] cmd Command to log result for * \param[in] exec_time_ms Execution time in milliseconds, if known * \param[in] queue_time_ms Queue time in milliseconds, if known */ static void log_finished(const lrmd_cmd_t *cmd, int exec_time_ms, int queue_time_ms) { int log_level = LOG_INFO; GString *str = g_string_sized_new(100); // reasonable starting size if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { log_level = LOG_DEBUG; } g_string_append_printf(str, "%s %s (call %d", cmd->rsc_id, cmd->action, cmd->call_id); if (cmd->last_pid != 0) { g_string_append_printf(str, ", PID %d", cmd->last_pid); } switch (cmd->result.execution_status) { case PCMK_EXEC_DONE: g_string_append_printf(str, ") exited with status %d", cmd->result.exit_status); break; case PCMK_EXEC_CANCELLED: g_string_append_printf(str, ") cancelled"); break; default: pcmk__g_strcat(str, ") could not be executed: ", pcmk_exec_status_str(cmd->result.execution_status), NULL); break; } if (cmd->result.exit_reason != NULL) { pcmk__g_strcat(str, " (", cmd->result.exit_reason, ")", NULL); } #ifdef PCMK__TIME_USE_CGT pcmk__g_strcat(str, " (execution time ", pcmk__readable_interval(exec_time_ms), NULL); if (queue_time_ms > 0) { pcmk__g_strcat(str, " after being queued ", pcmk__readable_interval(queue_time_ms), NULL); } g_string_append_c(str, ')'); #endif do_crm_log(log_level, "%s", str->str); g_string_free(str, TRUE); } static void log_execute(lrmd_cmd_t * cmd) { int log_level = LOG_INFO; if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { log_level = LOG_DEBUG; } do_crm_log(log_level, "executing - rsc:%s action:%s call_id:%d", cmd->rsc_id, cmd->action, cmd->call_id); } static const char * normalize_action_name(lrmd_rsc_t * rsc, const char *action) { if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei) && pcmk_is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) { return PCMK_ACTION_STATUS; } return action; } static lrmd_rsc_t * build_rsc_from_xml(xmlNode * msg) { xmlNode *rsc_xml = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); lrmd_rsc_t *rsc = NULL; rsc = pcmk__assert_alloc(1, sizeof(lrmd_rsc_t)); pcmk__xe_get_int(msg, PCMK__XA_LRMD_CALLOPT, &rsc->call_opts); - rsc->rsc_id = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_RSC_ID); - rsc->class = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_CLASS); - rsc->provider = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_PROVIDER); - rsc->type = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_TYPE); + rsc->rsc_id = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_RSC_ID); + rsc->class = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_CLASS); + rsc->provider = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_PROVIDER); + rsc->type = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_TYPE); rsc->work = mainloop_add_trigger(G_PRIORITY_HIGH, execute_resource_action, rsc); // Initialize fence device probes (to return "not running") pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); return rsc; } static lrmd_cmd_t * create_lrmd_cmd(xmlNode *msg, pcmk__client_t *client) { int call_options = 0; xmlNode *rsc_xml = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); lrmd_cmd_t *cmd = NULL; cmd = pcmk__assert_alloc(1, sizeof(lrmd_cmd_t)); pcmk__xe_get_int(msg, PCMK__XA_LRMD_CALLOPT, &call_options); cmd->call_opts = call_options; cmd->client_id = pcmk__str_copy(client->id); pcmk__xe_get_int(msg, PCMK__XA_LRMD_CALLID, &cmd->call_id); pcmk__xe_get_guint(rsc_xml, PCMK__XA_LRMD_RSC_INTERVAL, &cmd->interval_ms); pcmk__xe_get_int(rsc_xml, PCMK__XA_LRMD_TIMEOUT, &cmd->timeout); pcmk__xe_get_int(rsc_xml, PCMK__XA_LRMD_RSC_START_DELAY, &cmd->start_delay); cmd->timeout_orig = cmd->timeout; - cmd->origin = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_ORIGIN); - cmd->action = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_RSC_ACTION); - cmd->userdata_str = crm_element_value_copy(rsc_xml, - PCMK__XA_LRMD_RSC_USERDATA_STR); - cmd->rsc_id = crm_element_value_copy(rsc_xml, PCMK__XA_LRMD_RSC_ID); + cmd->origin = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_ORIGIN); + cmd->action = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_RSC_ACTION); + cmd->userdata_str = pcmk__xe_get_copy(rsc_xml, + PCMK__XA_LRMD_RSC_USERDATA_STR); + cmd->rsc_id = pcmk__xe_get_copy(rsc_xml, PCMK__XA_LRMD_RSC_ID); cmd->params = xml2list(rsc_xml); if (pcmk__str_eq(g_hash_table_lookup(cmd->params, "CRM_meta_on_fail"), PCMK_VALUE_BLOCK, pcmk__str_casei)) { crm_debug("Setting flag to leave pid group on timeout and " "only kill action pid for " PCMK__OP_FMT, cmd->rsc_id, cmd->action, cmd->interval_ms); cmd->service_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Action", cmd->action, 0, SVC_ACTION_LEAVE_GROUP, "SVC_ACTION_LEAVE_GROUP"); } return cmd; } static void stop_recurring_timer(lrmd_cmd_t *cmd) { if (cmd) { if (cmd->stonith_recurring_id) { g_source_remove(cmd->stonith_recurring_id); } cmd->stonith_recurring_id = 0; } } static void free_lrmd_cmd(lrmd_cmd_t * cmd) { stop_recurring_timer(cmd); if (cmd->delay_id) { g_source_remove(cmd->delay_id); } if (cmd->params) { g_hash_table_destroy(cmd->params); } pcmk__reset_result(&(cmd->result)); free(cmd->origin); free(cmd->action); free(cmd->real_action); free(cmd->userdata_str); free(cmd->rsc_id); free(cmd->client_id); free(cmd); } static gboolean stonith_recurring_op_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc; cmd->stonith_recurring_id = 0; if (!cmd->rsc_id) { return FALSE; } rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); pcmk__assert(rsc != NULL); /* take it out of recurring_ops list, and put it in the pending ops * to be executed */ rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_queue), &(cmd->t_first_queue)); #endif mainloop_set_trigger(rsc->work); return FALSE; } static inline void start_recurring_timer(lrmd_cmd_t *cmd) { if (!cmd || (cmd->interval_ms <= 0)) { return; } cmd->stonith_recurring_id = pcmk__create_timer(cmd->interval_ms, stonith_recurring_op_helper, cmd); } static gboolean start_delay_helper(gpointer data) { lrmd_cmd_t *cmd = data; lrmd_rsc_t *rsc = NULL; cmd->delay_id = 0; rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; if (rsc) { mainloop_set_trigger(rsc->work); } return FALSE; } /*! * \internal * \brief Check whether a list already contains the equivalent of a given action * * \param[in] action_list List to search * \param[in] cmd Action to search for */ static lrmd_cmd_t * find_duplicate_action(const GList *action_list, const lrmd_cmd_t *cmd) { for (const GList *item = action_list; item != NULL; item = item->next) { lrmd_cmd_t *dup = item->data; if (action_matches(cmd, dup->action, dup->interval_ms)) { return dup; } } return NULL; } static bool merge_recurring_duplicate(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { lrmd_cmd_t * dup = NULL; bool dup_pending = true; if (cmd->interval_ms == 0) { return false; } // Search for a duplicate of this action (in-flight or not) dup = find_duplicate_action(rsc->pending_ops, cmd); if (dup == NULL) { dup_pending = false; dup = find_duplicate_action(rsc->recurring_ops, cmd); if (dup == NULL) { return false; } } /* Do not merge fencing monitors marked for cancellation, so we can reply to * the cancellation separately. */ if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei) && (dup->result.execution_status == PCMK_EXEC_CANCELLED)) { return false; } /* This should not occur. If it does, we need to investigate how something * like this is possible in the controller. */ crm_warn("Duplicate recurring op entry detected (" PCMK__OP_FMT "), merging with previous op entry", rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval_ms); // Merge new action's call ID and user data into existing action dup->first_notify_sent = false; free(dup->userdata_str); dup->userdata_str = cmd->userdata_str; cmd->userdata_str = NULL; dup->call_id = cmd->call_id; free_lrmd_cmd(cmd); cmd = NULL; /* If dup is not pending, that means it has already executed at least once * and is waiting in the interval. In that case, stop waiting and initiate * a new instance now. */ if (!dup_pending) { if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { stop_recurring_timer(dup); stonith_recurring_op_helper(dup); } else { services_action_kick(rsc->rsc_id, normalize_action_name(rsc, dup->action), dup->interval_ms); } } return true; } static void schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd) { CRM_CHECK(cmd != NULL, return); CRM_CHECK(rsc != NULL, return); crm_trace("Scheduling %s on %s", cmd->action, rsc->rsc_id); if (merge_recurring_duplicate(rsc, cmd)) { // Equivalent of cmd has already been scheduled return; } /* The controller expects the executor to automatically cancel * recurring operations before a resource stops. */ if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { cancel_all_recurring(rsc, NULL); } rsc->pending_ops = g_list_append(rsc->pending_ops, cmd); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_queue), &(cmd->t_first_queue)); #endif mainloop_set_trigger(rsc->work); if (cmd->start_delay) { cmd->delay_id = pcmk__create_timer(cmd->start_delay, start_delay_helper, cmd); } } static xmlNode * create_lrmd_reply(const char *origin, int rc, int call_id) { xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_LRMD_REPLY); crm_xml_add(reply, PCMK__XA_LRMD_ORIGIN, origin); crm_xml_add_int(reply, PCMK__XA_LRMD_RC, rc); crm_xml_add_int(reply, PCMK__XA_LRMD_CALLID, call_id); return reply; } static void send_client_notify(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; pcmk__client_t *client = value; int rc; int log_level = LOG_WARNING; const char *msg = NULL; CRM_CHECK(client != NULL, return); if (client->name == NULL) { crm_trace("Skipping notification to client without name"); return; } if (pcmk_is_set(client->flags, pcmk__client_to_proxy)) { /* We only want to notify clients of the executor IPC API. If we are * running as Pacemaker Remote, we may have clients proxied to other * IPC services in the cluster, so skip those. */ crm_trace("Skipping executor API notification to client %s", pcmk__client_name(client)); return; } rc = lrmd_server_send_notify(client, update_msg); if (rc == pcmk_rc_ok) { return; } switch (rc) { case ENOTCONN: case EPIPE: // Client exited without waiting for notification log_level = LOG_INFO; msg = "Disconnected"; break; default: msg = pcmk_rc_str(rc); break; } do_crm_log(log_level, "Could not notify client %s: %s " QB_XS " rc=%d", pcmk__client_name(client), msg, rc); } static void send_cmd_complete_notify(lrmd_cmd_t * cmd) { xmlNode *notify = NULL; int exec_time = 0; int queue_time = 0; #ifdef PCMK__TIME_USE_CGT exec_time = time_diff_ms(NULL, &(cmd->t_run)); queue_time = time_diff_ms(&cmd->t_run, &(cmd->t_queue)); #endif log_finished(cmd, exec_time, queue_time); /* If the originator requested to be notified only for changes in recurring * operation results, skip the notification if the result hasn't changed. */ if (cmd->first_notify_sent && pcmk_is_set(cmd->call_opts, lrmd_opt_notify_changes_only) && (cmd->last_notify_rc == cmd->result.exit_status) && (cmd->last_notify_op_status == cmd->result.execution_status)) { return; } cmd->first_notify_sent = true; cmd->last_notify_rc = cmd->result.exit_status; cmd->last_notify_op_status = cmd->result.execution_status; notify = pcmk__xe_create(NULL, PCMK__XE_LRMD_NOTIFY); crm_xml_add(notify, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add_int(notify, PCMK__XA_LRMD_TIMEOUT, cmd->timeout); crm_xml_add_ms(notify, PCMK__XA_LRMD_RSC_INTERVAL, cmd->interval_ms); crm_xml_add_int(notify, PCMK__XA_LRMD_RSC_START_DELAY, cmd->start_delay); crm_xml_add_int(notify, PCMK__XA_LRMD_EXEC_RC, cmd->result.exit_status); crm_xml_add_int(notify, PCMK__XA_LRMD_EXEC_OP_STATUS, cmd->result.execution_status); crm_xml_add_int(notify, PCMK__XA_LRMD_CALLID, cmd->call_id); crm_xml_add_int(notify, PCMK__XA_LRMD_RSC_DELETED, cmd->rsc_deleted); crm_xml_add_ll(notify, PCMK__XA_LRMD_RUN_TIME, (long long) cmd->epoch_last_run); crm_xml_add_ll(notify, PCMK__XA_LRMD_RCCHANGE_TIME, (long long) cmd->epoch_rcchange); #ifdef PCMK__TIME_USE_CGT crm_xml_add_int(notify, PCMK__XA_LRMD_EXEC_TIME, exec_time); crm_xml_add_int(notify, PCMK__XA_LRMD_QUEUE_TIME, queue_time); #endif crm_xml_add(notify, PCMK__XA_LRMD_OP, LRMD_OP_RSC_EXEC); crm_xml_add(notify, PCMK__XA_LRMD_RSC_ID, cmd->rsc_id); if(cmd->real_action) { crm_xml_add(notify, PCMK__XA_LRMD_RSC_ACTION, cmd->real_action); } else { crm_xml_add(notify, PCMK__XA_LRMD_RSC_ACTION, cmd->action); } crm_xml_add(notify, PCMK__XA_LRMD_RSC_USERDATA_STR, cmd->userdata_str); crm_xml_add(notify, PCMK__XA_LRMD_RSC_EXIT_REASON, cmd->result.exit_reason); if (cmd->result.action_stderr != NULL) { crm_xml_add(notify, PCMK__XA_LRMD_RSC_OUTPUT, cmd->result.action_stderr); } else if (cmd->result.action_stdout != NULL) { crm_xml_add(notify, PCMK__XA_LRMD_RSC_OUTPUT, cmd->result.action_stdout); } if (cmd->params) { char *key = NULL; char *value = NULL; GHashTableIter iter; xmlNode *args = pcmk__xe_create(notify, PCMK__XE_ATTRIBUTES); g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { hash2smartfield((gpointer) key, (gpointer) value, args); } } if ((cmd->client_id != NULL) && pcmk_is_set(cmd->call_opts, lrmd_opt_notify_orig_only)) { pcmk__client_t *client = pcmk__find_client_by_id(cmd->client_id); if (client != NULL) { send_client_notify(client->id, client, notify); } } else { pcmk__foreach_ipc_client(send_client_notify, notify); } pcmk__xml_free(notify); } static void send_generic_notify(int rc, xmlNode * request) { if (pcmk__ipc_client_count() != 0) { int call_id = 0; xmlNode *notify = NULL; xmlNode *rsc_xml = pcmk__xpath_find_one(request->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); const char *rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); const char *op = pcmk__xe_get(request, PCMK__XA_LRMD_OP); pcmk__xe_get_int(request, PCMK__XA_LRMD_CALLID, &call_id); notify = pcmk__xe_create(NULL, PCMK__XE_LRMD_NOTIFY); crm_xml_add(notify, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add_int(notify, PCMK__XA_LRMD_RC, rc); crm_xml_add_int(notify, PCMK__XA_LRMD_CALLID, call_id); crm_xml_add(notify, PCMK__XA_LRMD_OP, op); crm_xml_add(notify, PCMK__XA_LRMD_RSC_ID, rsc_id); pcmk__foreach_ipc_client(send_client_notify, notify); pcmk__xml_free(notify); } } static void cmd_reset(lrmd_cmd_t * cmd) { cmd->last_pid = 0; #ifdef PCMK__TIME_USE_CGT memset(&cmd->t_run, 0, sizeof(cmd->t_run)); memset(&cmd->t_queue, 0, sizeof(cmd->t_queue)); #endif cmd->epoch_last_run = 0; pcmk__reset_result(&(cmd->result)); cmd->result.execution_status = PCMK_EXEC_DONE; } static void cmd_finalize(lrmd_cmd_t * cmd, lrmd_rsc_t * rsc) { crm_trace("Resource operation rsc:%s action:%s completed (%p %p)", cmd->rsc_id, cmd->action, rsc ? rsc->active : NULL, cmd); if (rsc && (rsc->active == cmd)) { rsc->active = NULL; mainloop_set_trigger(rsc->work); } if (!rsc) { cmd->rsc_deleted = 1; } /* reset original timeout so client notification has correct information */ cmd->timeout = cmd->timeout_orig; send_cmd_complete_notify(cmd); if ((cmd->interval_ms != 0) && (cmd->result.execution_status == PCMK_EXEC_CANCELLED)) { if (rsc) { rsc->recurring_ops = g_list_remove(rsc->recurring_ops, cmd); rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else if (cmd->interval_ms == 0) { if (rsc) { rsc->pending_ops = g_list_remove(rsc->pending_ops, cmd); } free_lrmd_cmd(cmd); } else { /* Clear all the values pertaining just to the last iteration of a recurring op. */ cmd_reset(cmd); } } struct notify_new_client_data { xmlNode *notify; pcmk__client_t *new_client; }; static void notify_one_client(gpointer key, gpointer value, gpointer user_data) { pcmk__client_t *client = value; struct notify_new_client_data *data = user_data; if (!pcmk__str_eq(client->id, data->new_client->id, pcmk__str_casei)) { send_client_notify(key, (gpointer) client, (gpointer) data->notify); } } void notify_of_new_client(pcmk__client_t *new_client) { struct notify_new_client_data data; data.new_client = new_client; data.notify = pcmk__xe_create(NULL, PCMK__XE_LRMD_NOTIFY); crm_xml_add(data.notify, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data.notify, PCMK__XA_LRMD_OP, LRMD_OP_NEW_CLIENT); pcmk__foreach_ipc_client(notify_one_client, &data); pcmk__xml_free(data.notify); } void client_disconnect_cleanup(const char *client_id) { GHashTableIter iter; lrmd_rsc_t *rsc = NULL; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & rsc)) { if (pcmk_all_flags_set(rsc->call_opts, lrmd_opt_drop_recurring)) { /* This client is disconnecting, drop any recurring operations * it may have initiated on the resource */ cancel_all_recurring(rsc, client_id); } } } static void action_complete(svc_action_t * action) { lrmd_rsc_t *rsc; lrmd_cmd_t *cmd = action->cb_data; enum ocf_exitcode code; #ifdef PCMK__TIME_USE_CGT const char *rclass = NULL; bool goagain = false; int time_sum = 0; int timeout_left = 0; int delay = 0; #endif if (!cmd) { crm_err("Completed executor action (%s) does not match any known operations", action->id); return; } #ifdef PCMK__TIME_USE_CGT if (cmd->result.exit_status != action->rc) { cmd->epoch_rcchange = time(NULL); } #endif cmd->last_pid = action->pid; // Cast variable instead of function return to keep compilers happy code = services_result2ocf(action->standard, cmd->action, action->rc); pcmk__set_result(&(cmd->result), (int) code, action->status, services__exit_reason(action)); rsc = cmd->rsc_id ? g_hash_table_lookup(rsc_list, cmd->rsc_id) : NULL; #ifdef PCMK__TIME_USE_CGT if (rsc != NULL) { rclass = rsc->class; #if PCMK__ENABLE_SERVICE if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { rclass = resources_find_service_class(rsc->type); } #endif } if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) { goto finalize; } if (pcmk__result_ok(&(cmd->result)) && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START, PCMK_ACTION_STOP, NULL)) { /* Getting results for when a start or stop action completes is now * handled by watching for JobRemoved() signals from systemd and * reacting to them. So, we can bypass the rest of the code in this * function for those actions, and simply finalize cmd. * * @TODO When monitors are handled in the same way, this function * can either be drastically simplified or done away with entirely. */ services__copy_result(action, &(cmd->result)); goto finalize; } else if (cmd->result.execution_status == PCMK_EXEC_PENDING && pcmk__str_any_of(cmd->action, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS, NULL) && cmd->interval_ms == 0 && cmd->real_action == NULL) { /* If the state is Pending at the time of probe, execute follow-up monitor. */ goagain = true; cmd->real_action = cmd->action; cmd->action = pcmk__str_copy(PCMK_ACTION_MONITOR); } else if (cmd->real_action != NULL) { // This is follow-up monitor to check whether start/stop/probe(monitor) completed if (cmd->result.execution_status == PCMK_EXEC_PENDING) { goagain = true; } else if (pcmk__result_ok(&(cmd->result)) && pcmk__str_eq(cmd->real_action, PCMK_ACTION_STOP, pcmk__str_casei)) { goagain = true; } else { int time_sum = time_diff_ms(NULL, &(cmd->t_first_run)); int timeout_left = cmd->timeout_orig - time_sum; crm_debug("%s systemd %s is now complete (elapsed=%dms, " "remaining=%dms): %s (%d)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, crm_exit_str(cmd->result.exit_status), cmd->result.exit_status); cmd_original_times(cmd); // Monitors may return "not running", but start/stop shouldn't if ((cmd->result.execution_status == PCMK_EXEC_DONE) && (cmd->result.exit_status == PCMK_OCF_NOT_RUNNING)) { if (pcmk__str_eq(cmd->real_action, PCMK_ACTION_START, pcmk__str_casei)) { cmd->result.exit_status = PCMK_OCF_UNKNOWN_ERROR; } else if (pcmk__str_eq(cmd->real_action, PCMK_ACTION_STOP, pcmk__str_casei)) { cmd->result.exit_status = PCMK_OCF_OK; } } } } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS, NULL) && (cmd->interval_ms > 0)) { /* For monitors, excluding follow-up monitors, */ /* if the pending state persists from the first notification until its timeout, */ /* it will be treated as a timeout. */ if ((cmd->result.execution_status == PCMK_EXEC_PENDING) && (cmd->last_notify_op_status == PCMK_EXEC_PENDING)) { int time_left = time(NULL) - (cmd->epoch_rcchange + (cmd->timeout_orig/1000)); if (time_left >= 0) { crm_notice("Giving up on %s %s (rc=%d): monitor pending timeout " "(first pending notification=%s timeout=%ds)", cmd->rsc_id, cmd->action, cmd->result.exit_status, pcmk__trim(ctime(&cmd->epoch_rcchange)), cmd->timeout_orig); pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "Investigate reason for timeout, and adjust " "configured operation timeout if necessary"); cmd_original_times(cmd); } } } if (!goagain) { goto finalize; } time_sum = time_diff_ms(NULL, &(cmd->t_first_run)); timeout_left = cmd->timeout_orig - time_sum; delay = cmd->timeout_orig / 10; if (delay >= timeout_left && timeout_left > 20) { delay = timeout_left/2; } delay = QB_MIN(2000, delay); if (delay < timeout_left) { cmd->start_delay = delay; cmd->timeout = timeout_left; if (pcmk__result_ok(&(cmd->result))) { crm_debug("%s %s may still be in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->real_action, time_sum, timeout_left, delay); } else if (cmd->result.execution_status == PCMK_EXEC_PENDING) { crm_info("%s %s is still in progress: re-scheduling (elapsed=%dms, remaining=%dms, start_delay=%dms)", cmd->rsc_id, cmd->action, time_sum, timeout_left, delay); } else { crm_notice("%s %s failed: %s: Re-scheduling (remaining " "timeout %s) " QB_XS " exitstatus=%d elapsed=%dms start_delay=%dms)", cmd->rsc_id, cmd->action, crm_exit_str(cmd->result.exit_status), pcmk__readable_interval(timeout_left), cmd->result.exit_status, time_sum, delay); } cmd_reset(cmd); if (rsc) { rsc->active = NULL; } schedule_lrmd_cmd(rsc, cmd); /* Don't finalize cmd, we're not done with it yet */ return; } else { crm_notice("Giving up on %s %s (rc=%d): timeout (elapsed=%dms, remaining=%dms)", cmd->rsc_id, (cmd->real_action? cmd->real_action : cmd->action), cmd->result.exit_status, time_sum, timeout_left); pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT, "Investigate reason for timeout, and adjust " "configured operation timeout if necessary"); cmd_original_times(cmd); } #endif finalize: pcmk__set_result_output(&(cmd->result), services__grab_stdout(action), services__grab_stderr(action)); cmd_finalize(cmd, rsc); } /*! * \internal * \brief Process the result of a fence device action (start, stop, or monitor) * * \param[in,out] cmd Fence device action that completed * \param[in] exit_status Fencer API exit status for action * \param[in] execution_status Fencer API execution status for action * \param[in] exit_reason Human-friendly detail, if action failed */ static void stonith_action_complete(lrmd_cmd_t *cmd, int exit_status, enum pcmk_exec_status execution_status, const char *exit_reason) { // This can be NULL if resource was removed before command completed lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, cmd->rsc_id); // Simplify fencer exit status to uniform exit status if (exit_status != CRM_EX_OK) { exit_status = PCMK_OCF_UNKNOWN_ERROR; } if (cmd->result.execution_status == PCMK_EXEC_CANCELLED) { /* An in-flight fence action was cancelled. The execution status is * already correct, so don't overwrite it. */ execution_status = PCMK_EXEC_CANCELLED; } else { /* Some execution status codes have specific meanings for the fencer * that executor clients may not expect, so map them to a simple error * status. */ switch (execution_status) { case PCMK_EXEC_NOT_CONNECTED: case PCMK_EXEC_INVALID: execution_status = PCMK_EXEC_ERROR; break; case PCMK_EXEC_NO_FENCE_DEVICE: /* This should be possible only for probes in practice, but * interpret for all actions to be safe. */ if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_none)) { exit_status = PCMK_OCF_NOT_RUNNING; } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) { exit_status = PCMK_OCF_OK; } else { exit_status = PCMK_OCF_NOT_INSTALLED; } execution_status = PCMK_EXEC_ERROR; break; case PCMK_EXEC_NOT_SUPPORTED: exit_status = PCMK_OCF_UNIMPLEMENT_FEATURE; break; default: break; } } pcmk__set_result(&cmd->result, exit_status, execution_status, exit_reason); // Certain successful actions change the known state of the resource if ((rsc != NULL) && pcmk__result_ok(&(cmd->result))) { if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { pcmk__set_result(&rsc->fence_probe_result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); // "running" } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); // "not running" } } /* The recurring timer should not be running at this point in any case, but * as a failsafe, stop it if it is. */ stop_recurring_timer(cmd); /* Reschedule this command if appropriate. If a recurring command is *not* * rescheduled, its status must be PCMK_EXEC_CANCELLED, otherwise it will * not be removed from recurring_ops by cmd_finalize(). */ if (rsc && (cmd->interval_ms > 0) && (cmd->result.execution_status != PCMK_EXEC_CANCELLED)) { start_recurring_timer(cmd); } cmd_finalize(cmd, rsc); } static void lrmd_stonith_callback(stonith_t * stonith, stonith_callback_data_t * data) { if ((data == NULL) || (data->userdata == NULL)) { crm_err("Ignoring fence action result: " "Invalid callback arguments (bug?)"); } else { stonith_action_complete((lrmd_cmd_t *) data->userdata, stonith__exit_status(data), stonith__execution_status(data), stonith__exit_reason(data)); } } void stonith_connection_failed(void) { GHashTableIter iter; lrmd_rsc_t *rsc = NULL; crm_warn("Connection to fencer lost (any pending operations for " "fence devices will be considered failed)"); g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &rsc)) { if (!pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) { continue; } /* If we registered this fence device, we don't know whether the * fencer still has the registration or not. Cause future probes to * return an error until the resource is stopped or started * successfully. This is especially important if the controller also * went away (possibly due to a cluster layer restart) and won't * receive our client notification of any monitors finalized below. */ if (rsc->fence_probe_result.execution_status == PCMK_EXEC_DONE) { pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR, PCMK_EXEC_NOT_CONNECTED, "Lost connection to fencer"); } // Consider any active, pending, or recurring operations as failed for (GList *op = rsc->recurring_ops; op != NULL; op = op->next) { lrmd_cmd_t *cmd = op->data; /* This won't free a recurring op but instead restart its timer. * If cmd is rsc->active, this will set rsc->active to NULL, so we * don't have to worry about finalizing it a second time below. */ stonith_action_complete(cmd, CRM_EX_ERROR, PCMK_EXEC_NOT_CONNECTED, "Lost connection to fencer"); } if (rsc->active != NULL) { rsc->pending_ops = g_list_prepend(rsc->pending_ops, rsc->active); } while (rsc->pending_ops != NULL) { // This will free the op and remove it from rsc->pending_ops stonith_action_complete((lrmd_cmd_t *) rsc->pending_ops->data, CRM_EX_ERROR, PCMK_EXEC_NOT_CONNECTED, "Lost connection to fencer"); } } } /*! * \internal * \brief Execute a stonith resource "start" action * * Start a stonith resource by registering it with the fencer. * (Stonith agents don't have a start command.) * * \param[in,out] stonith_api Connection to fencer * \param[in] rsc Stonith resource to start * \param[in] cmd Start command to execute * * \return pcmk_ok on success, -errno otherwise */ static int execd_stonith_start(stonith_t *stonith_api, const lrmd_rsc_t *rsc, const lrmd_cmd_t *cmd) { char *key = NULL; char *value = NULL; stonith_key_value_t *device_params = NULL; int rc = pcmk_ok; // Convert command parameters to stonith API key/values if (cmd->params) { GHashTableIter iter; g_hash_table_iter_init(&iter, cmd->params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { device_params = stonith_key_value_add(device_params, key, value); } } /* The fencer will automatically register devices via CIB notifications * when the CIB changes, but to avoid a possible race condition between * the fencer receiving the notification and the executor requesting that * resource, the executor registers the device as well. The fencer knows how * to handle duplicate registrations. */ rc = stonith_api->cmds->register_device(stonith_api, st_opt_sync_call, cmd->rsc_id, rsc->provider, rsc->type, device_params); stonith_key_value_freeall(device_params, 1, 1); return rc; } /*! * \internal * \brief Execute a stonith resource "stop" action * * Stop a stonith resource by unregistering it with the fencer. * (Stonith agents don't have a stop command.) * * \param[in,out] stonith_api Connection to fencer * \param[in] rsc Stonith resource to stop * * \return pcmk_ok on success, -errno otherwise */ static inline int execd_stonith_stop(stonith_t *stonith_api, const lrmd_rsc_t *rsc) { /* @TODO Failure would indicate a problem communicating with fencer; * perhaps we should try reconnecting and retrying a few times? */ return stonith_api->cmds->remove_device(stonith_api, st_opt_sync_call, rsc->rsc_id); } /*! * \internal * \brief Initiate a stonith resource agent recurring "monitor" action * * \param[in,out] stonith_api Connection to fencer * \param[in,out] rsc Stonith resource to monitor * \param[in] cmd Monitor command being executed * * \return pcmk_ok if monitor was successfully initiated, -errno otherwise */ static inline int execd_stonith_monitor(stonith_t *stonith_api, lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { int rc = stonith_api->cmds->monitor(stonith_api, 0, cmd->rsc_id, pcmk__timeout_ms2s(cmd->timeout)); rc = stonith_api->cmds->register_callback(stonith_api, rc, 0, 0, cmd, "lrmd_stonith_callback", lrmd_stonith_callback); if (rc == TRUE) { rsc->active = cmd; rc = pcmk_ok; } else { rc = -pcmk_err_generic; } return rc; } static void execute_stonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { int rc = 0; bool do_monitor = FALSE; stonith_t *stonith_api = get_stonith_connection(); if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei) && (cmd->interval_ms == 0)) { // Probes don't require a fencer connection stonith_action_complete(cmd, rsc->fence_probe_result.exit_status, rsc->fence_probe_result.execution_status, rsc->fence_probe_result.exit_reason); return; } else if (stonith_api == NULL) { stonith_action_complete(cmd, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_NOT_CONNECTED, "No connection to fencer"); return; } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) { rc = execd_stonith_start(stonith_api, rsc, cmd); if (rc == pcmk_ok) { do_monitor = TRUE; } } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) { rc = execd_stonith_stop(stonith_api, rsc); } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) { do_monitor = TRUE; } else { stonith_action_complete(cmd, PCMK_OCF_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR, "Invalid fence device action (bug?)"); return; } if (do_monitor) { rc = execd_stonith_monitor(stonith_api, rsc, cmd); if (rc == pcmk_ok) { // Don't clean up yet, we will find out result of the monitor later return; } } stonith_action_complete(cmd, ((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR), stonith__legacy2status(rc), ((rc == -pcmk_err_generic)? NULL : pcmk_strerror(rc))); } static void execute_nonstonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd) { svc_action_t *action = NULL; GHashTable *params_copy = NULL; pcmk__assert((rsc != NULL) && (cmd != NULL)); crm_trace("Creating action, resource:%s action:%s class:%s provider:%s agent:%s", rsc->rsc_id, cmd->action, rsc->class, rsc->provider, rsc->type); params_copy = pcmk__str_table_dup(cmd->params); action = services__create_resource_action(rsc->rsc_id, rsc->class, rsc->provider, rsc->type, normalize_action_name(rsc, cmd->action), cmd->interval_ms, cmd->timeout, params_copy, cmd->service_flags); if (action == NULL) { pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, strerror(ENOMEM)); cmd_finalize(cmd, rsc); return; } if (action->rc != PCMK_OCF_UNKNOWN) { services__copy_result(action, &(cmd->result)); services_action_free(action); cmd_finalize(cmd, rsc); return; } action->cb_data = cmd; if (services_action_async(action, action_complete)) { /* The services library has taken responsibility for the action. It * could be pending, blocked, or merged into a duplicate recurring * action, in which case the action callback (action_complete()) * will be called when the action completes, otherwise the callback has * already been called. * * action_complete() calls cmd_finalize() which can free cmd, so cmd * cannot be used here. */ } else { /* This is a recurring action that is not being cancelled and could not * be initiated. It has been rescheduled, and the action callback * (action_complete()) has been called, which in this case has already * called cmd_finalize(), which in this case should only reset (not * free) cmd. */ services__copy_result(action, &(cmd->result)); services_action_free(action); } } static gboolean execute_resource_action(gpointer user_data) { lrmd_rsc_t *rsc = (lrmd_rsc_t *) user_data; lrmd_cmd_t *cmd = NULL; CRM_CHECK(rsc != NULL, return FALSE); if (rsc->active) { crm_trace("%s is still active", rsc->rsc_id); return TRUE; } if (rsc->pending_ops) { GList *first = rsc->pending_ops; cmd = first->data; if (cmd->delay_id) { crm_trace ("Command %s %s was asked to run too early, waiting for start_delay timeout of %dms", cmd->rsc_id, cmd->action, cmd->start_delay); return TRUE; } rsc->pending_ops = g_list_remove_link(rsc->pending_ops, first); g_list_free_1(first); #ifdef PCMK__TIME_USE_CGT get_current_time(&(cmd->t_run), &(cmd->t_first_run)); #endif cmd->epoch_last_run = time(NULL); } if (!cmd) { crm_trace("Nothing further to do for %s", rsc->rsc_id); return TRUE; } rsc->active = cmd; /* only one op at a time for a rsc */ if (cmd->interval_ms) { rsc->recurring_ops = g_list_append(rsc->recurring_ops, cmd); } log_execute(cmd); if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { execute_stonith_action(rsc, cmd); } else { execute_nonstonith_action(rsc, cmd); } return TRUE; } void free_rsc(gpointer data) { GList *gIter = NULL; lrmd_rsc_t *rsc = data; int is_stonith = pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei); gIter = rsc->pending_ops; while (gIter != NULL) { GList *next = gIter->next; lrmd_cmd_t *cmd = gIter->data; /* command was never executed */ cmd->result.execution_status = PCMK_EXEC_CANCELLED; cmd_finalize(cmd, NULL); gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->pending_ops); gIter = rsc->recurring_ops; while (gIter != NULL) { GList *next = gIter->next; lrmd_cmd_t *cmd = gIter->data; if (is_stonith) { cmd->result.execution_status = PCMK_EXEC_CANCELLED; /* If a stonith command is in-flight, just mark it as cancelled; * it is not safe to finalize/free the cmd until the stonith api * says it has either completed or timed out. */ if (rsc->active != cmd) { cmd_finalize(cmd, NULL); } } else { /* This command is already handed off to service library, * let service library cancel it and tell us via the callback * when it is cancelled. The rsc can be safely destroyed * even if we are waiting for the cancel result */ services_action_cancel(rsc->rsc_id, normalize_action_name(rsc, cmd->action), cmd->interval_ms); } gIter = next; } /* frees list, but not list elements. */ g_list_free(rsc->recurring_ops); free(rsc->rsc_id); free(rsc->class); free(rsc->provider); free(rsc->type); mainloop_destroy_trigger(rsc->work); free(rsc); } static int process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id, xmlNode **reply) { int rc = pcmk_ok; time_t now = time(NULL); const char *protocol_version = pcmk__xe_get(request, PCMK__XA_LRMD_PROTOCOL_VERSION); const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE); if (compare_version(protocol_version, LRMD_COMPATIBLE_PROTOCOL) < 0) { crm_err("Cluster API version must be greater than or equal to %s, not %s", LRMD_COMPATIBLE_PROTOCOL, protocol_version); rc = -EPROTO; } if (pcmk__xe_attr_is_true(request, PCMK__XA_LRMD_IS_IPC_PROVIDER)) { #ifdef PCMK__COMPILE_REMOTE if ((client->remote != NULL) && pcmk_is_set(client->flags, pcmk__client_tls_handshake_complete)) { const char *op = pcmk__xe_get(request, PCMK__XA_LRMD_OP); // This is a remote connection from a cluster node's controller ipc_proxy_add_provider(client); /* @TODO Allowing multiple proxies makes no sense given that clients * have no way to choose between them. Maybe always use the most * recent one and switch any existing IPC connections to use it, * by iterating over ipc_clients here, and if client->id doesn't * match the client's userdata, replace the userdata with the new * ID. After the iteration, call lrmd_remote_client_destroy() on any * of the replaced values in ipc_providers. */ /* If this was a register operation, also ask for new schema files but * only if it's supported by the protocol version. */ if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none) && LRMD_SUPPORTS_SCHEMA_XFER(protocol_version)) { remoted_request_cib_schema_files(); } } else { rc = -EACCES; } #else rc = -EPROTONOSUPPORT; #endif } *reply = create_lrmd_reply(__func__, rc, call_id); crm_xml_add(*reply, PCMK__XA_LRMD_OP, CRM_OP_REGISTER); crm_xml_add(*reply, PCMK__XA_LRMD_CLIENTID, client->id); crm_xml_add(*reply, PCMK__XA_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); crm_xml_add_ll(*reply, PCMK__XA_UPTIME, now - start_time); if (start_state) { crm_xml_add(*reply, PCMK__XA_NODE_START_STATE, start_state); } return rc; } static int process_lrmd_rsc_register(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = build_rsc_from_xml(request); lrmd_rsc_t *dup = g_hash_table_lookup(rsc_list, rsc->rsc_id); if (dup && pcmk__str_eq(rsc->class, dup->class, pcmk__str_casei) && pcmk__str_eq(rsc->provider, dup->provider, pcmk__str_casei) && pcmk__str_eq(rsc->type, dup->type, pcmk__str_casei)) { crm_notice("Ignoring duplicate registration of '%s'", rsc->rsc_id); free_rsc(rsc); return rc; } g_hash_table_replace(rsc_list, rsc->rsc_id, rsc); crm_info("Cached agent information for '%s'", rsc->rsc_id); return rc; } static xmlNode * process_lrmd_get_rsc_info(xmlNode *request, int call_id) { int rc = pcmk_ok; xmlNode *rsc_xml = pcmk__xpath_find_one(request->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); const char *rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); xmlNode *reply = NULL; lrmd_rsc_t *rsc = NULL; if (rsc_id == NULL) { rc = -ENODEV; } else { rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Agent information for '%s' not in cache", rsc_id); rc = -ENODEV; } } reply = create_lrmd_reply(__func__, rc, call_id); if (rsc) { crm_xml_add(reply, PCMK__XA_LRMD_RSC_ID, rsc->rsc_id); crm_xml_add(reply, PCMK__XA_LRMD_CLASS, rsc->class); crm_xml_add(reply, PCMK__XA_LRMD_PROVIDER, rsc->provider); crm_xml_add(reply, PCMK__XA_LRMD_TYPE, rsc->type); } return reply; } static int process_lrmd_rsc_unregister(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; lrmd_rsc_t *rsc = NULL; xmlNode *rsc_xml = pcmk__xpath_find_one(request->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); const char *rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); if (!rsc_id) { return -ENODEV; } rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Ignoring unregistration of resource '%s', which is not registered", rsc_id); return pcmk_ok; } if (rsc->active) { /* let the caller know there are still active ops on this rsc to watch for */ crm_trace("Operation (%p) still in progress for unregistered resource %s", rsc->active, rsc_id); rc = -EINPROGRESS; } g_hash_table_remove(rsc_list, rsc_id); return rc; } static int process_lrmd_rsc_exec(pcmk__client_t *client, uint32_t id, xmlNode *request) { lrmd_rsc_t *rsc = NULL; lrmd_cmd_t *cmd = NULL; xmlNode *rsc_xml = pcmk__xpath_find_one(request->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); const char *rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); int call_id; if (!rsc_id) { return -EINVAL; } if (!(rsc = g_hash_table_lookup(rsc_list, rsc_id))) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); return -ENODEV; } cmd = create_lrmd_cmd(request, client); call_id = cmd->call_id; /* Don't reference cmd after handing it off to be scheduled. * The cmd could get merged and freed. */ schedule_lrmd_cmd(rsc, cmd); return call_id; } static int cancel_op(const char *rsc_id, const char *action, guint interval_ms) { GList *gIter = NULL; lrmd_rsc_t *rsc = g_hash_table_lookup(rsc_list, rsc_id); /* How to cancel an action. * 1. Check pending ops list, if it hasn't been handed off * to the service library or stonith recurring list remove * it there and that will stop it. * 2. If it isn't in the pending ops list, then it's either a * recurring op in the stonith recurring list, or the service * library's recurring list. Stop it there * 3. If not found in any lists, then this operation has either * been executed already and is not a recurring operation, or * never existed. */ if (!rsc) { return -ENODEV; } for (gIter = rsc->pending_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (action_matches(cmd, action, interval_ms)) { cmd->result.execution_status = PCMK_EXEC_CANCELLED; cmd_finalize(cmd, rsc); return pcmk_ok; } } if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { /* The service library does not handle stonith operations. * We have to handle recurring stonith operations ourselves. */ for (gIter = rsc->recurring_ops; gIter != NULL; gIter = gIter->next) { lrmd_cmd_t *cmd = gIter->data; if (action_matches(cmd, action, interval_ms)) { cmd->result.execution_status = PCMK_EXEC_CANCELLED; if (rsc->active != cmd) { cmd_finalize(cmd, rsc); } return pcmk_ok; } } } else if (services_action_cancel(rsc_id, normalize_action_name(rsc, action), interval_ms) == TRUE) { /* The service library will tell the action_complete callback function * this action was cancelled, which will destroy the cmd and remove * it from the recurring_op list. Do not do that in this function * if the service library says it cancelled it. */ return pcmk_ok; } return -EOPNOTSUPP; } static void cancel_all_recurring(lrmd_rsc_t * rsc, const char *client_id) { GList *cmd_list = NULL; GList *cmd_iter = NULL; /* Notice a copy of each list is created when concat is called. * This prevents odd behavior from occurring when the cmd_list * is iterated through later on. It is possible the cancel_op * function may end up modifying the recurring_ops and pending_ops * lists. If we did not copy those lists, our cmd_list iteration * could get messed up.*/ if (rsc->recurring_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->recurring_ops)); } if (rsc->pending_ops) { cmd_list = g_list_concat(cmd_list, g_list_copy(rsc->pending_ops)); } if (!cmd_list) { return; } for (cmd_iter = cmd_list; cmd_iter; cmd_iter = cmd_iter->next) { lrmd_cmd_t *cmd = cmd_iter->data; if (cmd->interval_ms == 0) { continue; } if (client_id && !pcmk__str_eq(cmd->client_id, client_id, pcmk__str_casei)) { continue; } cancel_op(rsc->rsc_id, cmd->action, cmd->interval_ms); } /* frees only the copied list data, not the cmds */ g_list_free(cmd_list); } static int process_lrmd_rsc_cancel(pcmk__client_t *client, uint32_t id, xmlNode *request) { xmlNode *rsc_xml = pcmk__xpath_find_one(request->doc, "//" PCMK__XE_LRMD_RSC, LOG_ERR); const char *rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); const char *action = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ACTION); guint interval_ms = 0; pcmk__xe_get_guint(rsc_xml, PCMK__XA_LRMD_RSC_INTERVAL, &interval_ms); if (!rsc_id || !action) { return -EINVAL; } return cancel_op(rsc_id, action, interval_ms); } static void add_recurring_op_xml(xmlNode *reply, lrmd_rsc_t *rsc) { xmlNode *rsc_xml = pcmk__xe_create(reply, PCMK__XE_LRMD_RSC); crm_xml_add(rsc_xml, PCMK__XA_LRMD_RSC_ID, rsc->rsc_id); for (GList *item = rsc->recurring_ops; item != NULL; item = item->next) { lrmd_cmd_t *cmd = item->data; xmlNode *op_xml = pcmk__xe_create(rsc_xml, PCMK__XE_LRMD_RSC_OP); crm_xml_add(op_xml, PCMK__XA_LRMD_RSC_ACTION, pcmk__s(cmd->real_action, cmd->action)); crm_xml_add_ms(op_xml, PCMK__XA_LRMD_RSC_INTERVAL, cmd->interval_ms); crm_xml_add_int(op_xml, PCMK__XA_LRMD_TIMEOUT, cmd->timeout_orig); } } static xmlNode * process_lrmd_get_recurring(xmlNode *request, int call_id) { int rc = pcmk_ok; const char *rsc_id = NULL; lrmd_rsc_t *rsc = NULL; xmlNode *reply = NULL; xmlNode *rsc_xml = NULL; // Resource ID is optional rsc_xml = pcmk__xe_first_child(request, PCMK__XE_LRMD_CALLDATA, NULL, NULL); if (rsc_xml) { rsc_xml = pcmk__xe_first_child(rsc_xml, PCMK__XE_LRMD_RSC, NULL, NULL); } if (rsc_xml) { rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); } // If resource ID is specified, resource must exist if (rsc_id != NULL) { rsc = g_hash_table_lookup(rsc_list, rsc_id); if (rsc == NULL) { crm_info("Resource '%s' not found (%d active resources)", rsc_id, g_hash_table_size(rsc_list)); rc = -ENODEV; } } reply = create_lrmd_reply(__func__, rc, call_id); // If resource ID is not specified, check all resources if (rsc_id == NULL) { GHashTableIter iter; char *key = NULL; g_hash_table_iter_init(&iter, rsc_list); while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &rsc)) { add_recurring_op_xml(reply, rsc); } } else if (rsc) { add_recurring_op_xml(reply, rsc); } return reply; } void process_lrmd_message(pcmk__client_t *client, uint32_t id, xmlNode *request) { int rc = pcmk_ok; int call_id = 0; const char *op = pcmk__xe_get(request, PCMK__XA_LRMD_OP); int do_reply = 0; int do_notify = 0; xmlNode *reply = NULL; /* Certain IPC commands may be done only by privileged users (i.e. root or * hacluster), because they would otherwise provide a means of bypassing * ACLs. */ bool allowed = pcmk_is_set(client->flags, pcmk__client_privileged); crm_trace("Processing %s operation from %s", op, client->id); pcmk__xe_get_int(request, PCMK__XA_LRMD_CALLID, &call_id); if (pcmk__str_eq(op, CRM_OP_IPC_FWD, pcmk__str_none)) { #ifdef PCMK__COMPILE_REMOTE if (allowed) { ipc_proxy_forward_client(client, request); } else { rc = -EACCES; } #else rc = -EPROTONOSUPPORT; #endif do_reply = 1; } else if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) { rc = process_lrmd_signon(client, request, call_id, &reply); do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_REG, pcmk__str_none)) { if (allowed) { rc = process_lrmd_rsc_register(client, id, request); do_notify = 1; } else { rc = -EACCES; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_INFO, pcmk__str_none)) { if (allowed) { reply = process_lrmd_get_rsc_info(request, call_id); } else { rc = -EACCES; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_UNREG, pcmk__str_none)) { if (allowed) { rc = process_lrmd_rsc_unregister(client, id, request); /* don't notify anyone about failed un-registers */ if (rc == pcmk_ok || rc == -EINPROGRESS) { do_notify = 1; } } else { rc = -EACCES; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_EXEC, pcmk__str_none)) { if (allowed) { rc = process_lrmd_rsc_exec(client, id, request); } else { rc = -EACCES; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_RSC_CANCEL, pcmk__str_none)) { if (allowed) { rc = process_lrmd_rsc_cancel(client, id, request); } else { rc = -EACCES; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_POKE, pcmk__str_none)) { do_notify = 1; do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_CHECK, pcmk__str_none)) { if (allowed) { xmlNode *wrapper = pcmk__xe_first_child(request, PCMK__XE_LRMD_CALLDATA, NULL, NULL); xmlNode *data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *timeout = NULL; CRM_LOG_ASSERT(data != NULL); timeout = pcmk__xe_get(data, PCMK__XA_LRMD_WATCHDOG); pcmk__valid_stonith_watchdog_timeout(timeout); } else { rc = -EACCES; } } else if (pcmk__str_eq(op, LRMD_OP_ALERT_EXEC, pcmk__str_none)) { if (allowed) { rc = process_lrmd_alert_exec(client, id, request); } else { rc = -EACCES; } do_reply = 1; } else if (pcmk__str_eq(op, LRMD_OP_GET_RECURRING, pcmk__str_none)) { if (allowed) { reply = process_lrmd_get_recurring(request, call_id); } else { rc = -EACCES; } do_reply = 1; } else { rc = -EOPNOTSUPP; do_reply = 1; crm_err("Unknown IPC request '%s' from client %s", op, pcmk__client_name(client)); } if (rc == -EACCES) { crm_warn("Rejecting IPC request '%s' from unprivileged client %s", op, pcmk__client_name(client)); } crm_debug("Processed %s operation from %s: rc=%d, reply=%d, notify=%d", op, client->id, rc, do_reply, do_notify); if (do_reply) { int send_rc = pcmk_rc_ok; if (reply == NULL) { reply = create_lrmd_reply(__func__, rc, call_id); } send_rc = lrmd_server_send_reply(client, id, reply); pcmk__xml_free(reply); if (send_rc != pcmk_rc_ok) { crm_warn("Reply to client %s failed: %s " QB_XS " rc=%d", pcmk__client_name(client), pcmk_rc_str(send_rc), send_rc); } } if (do_notify) { send_generic_notify(rc, request); } } diff --git a/daemons/execd/remoted_tls.c b/daemons/execd/remoted_tls.c index 625392887e..c7c04d283b 100644 --- a/daemons/execd/remoted_tls.c +++ b/daemons/execd/remoted_tls.c @@ -1,440 +1,439 @@ /* * Copyright 2012-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-execd.h" #include #define LRMD_REMOTE_AUTH_TIMEOUT 10000 static pcmk__tls_t *tls = NULL; static int ssock = -1; extern int lrmd_call_id; /*! * \internal * \brief Read (more) TLS handshake data from client * * \param[in,out] client IPC client doing handshake * * \return 0 on success or more data needed, -1 on error */ static int remoted__read_handshake_data(pcmk__client_t *client) { int rc = pcmk__read_handshake_data(client); if (rc == EAGAIN) { /* No more data is available at the moment. Just return for now; * we'll get invoked again once the client sends more. */ return 0; } else if (rc != pcmk_rc_ok) { return -1; } if (client->remote->auth_timeout) { g_source_remove(client->remote->auth_timeout); } client->remote->auth_timeout = 0; pcmk__set_client_flags(client, pcmk__client_tls_handshake_complete); crm_notice("Remote client connection accepted"); /* Now that the handshake is done, see if any client TLS certificate is * close to its expiration date and log if so. If a TLS certificate is not * in use, this function will just return so we don't need to check for the * session type here. */ pcmk__tls_check_cert_expiration(client->remote->tls_session); /* Only a client with access to the TLS key can connect, so we can treat * it as privileged. */ pcmk__set_client_flags(client, pcmk__client_privileged); // Alert other clients of the new connection notify_of_new_client(client); return 0; } static int lrmd_remote_client_msg(gpointer data) { int id = 0; int rc = pcmk_rc_ok; xmlNode *request = NULL; pcmk__client_t *client = data; if (!pcmk_is_set(client->flags, pcmk__client_tls_handshake_complete)) { return remoted__read_handshake_data(client); } rc = pcmk__remote_ready(client->remote, 0); switch (rc) { case pcmk_rc_ok: break; case ETIME: /* No message available to read */ return 0; default: /* Error */ crm_info("Error polling remote client: %s", pcmk_rc_str(rc)); return -1; } rc = pcmk__read_available_remote_data(client->remote); switch (rc) { case pcmk_rc_ok: break; case EAGAIN: /* We haven't read the whole message yet */ return 0; default: /* Error */ crm_info("Error reading from remote client: %s", pcmk_rc_str(rc)); return -1; } request = pcmk__remote_message_xml(client->remote); if (request == NULL) { return 0; } pcmk__xe_get_int(request, PCMK__XA_LRMD_REMOTE_MSG_ID, &id); crm_trace("Processing remote client request %d", id); if (!client->name) { - client->name = crm_element_value_copy(request, - PCMK__XA_LRMD_CLIENTNAME); + client->name = pcmk__xe_get_copy(request, PCMK__XA_LRMD_CLIENTNAME); } lrmd_call_id++; if (lrmd_call_id < 1) { lrmd_call_id = 1; } crm_xml_add(request, PCMK__XA_LRMD_CLIENTID, client->id); crm_xml_add(request, PCMK__XA_LRMD_CLIENTNAME, client->name); crm_xml_add_int(request, PCMK__XA_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); pcmk__xml_free(request); return 0; } static void lrmd_remote_client_destroy(gpointer user_data) { pcmk__client_t *client = user_data; if (client == NULL) { return; } crm_notice("Cleaning up after remote client %s disconnected", pcmk__client_name(client)); ipc_proxy_remove_provider(client); /* if this is the last remote connection, stop recurring * operations */ if (pcmk__ipc_client_count() == 1) { client_disconnect_cleanup(NULL); } if (client->remote->tls_session) { int csock = pcmk__tls_get_client_sock(client->remote); gnutls_bye(client->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(client->remote->tls_session); client->remote->tls_session = NULL; close(csock); } lrmd_client_destroy(client); return; } static gboolean lrmd_auth_timeout_cb(gpointer data) { pcmk__client_t *client = data; client->remote->auth_timeout = 0; if (pcmk_is_set(client->flags, pcmk__client_tls_handshake_complete)) { return FALSE; } mainloop_del_fd(client->remote->source); client->remote->source = NULL; crm_err("Remote client authentication timed out"); return FALSE; } // Dispatch callback for remote server socket static int lrmd_remote_listen(gpointer data) { int csock = -1; gnutls_session_t session = NULL; pcmk__client_t *new_client = NULL; // For client socket static struct mainloop_fd_callbacks lrmd_remote_fd_cb = { .dispatch = lrmd_remote_client_msg, .destroy = lrmd_remote_client_destroy, }; CRM_CHECK(ssock >= 0, return TRUE); if (pcmk__accept_remote_connection(ssock, &csock) != pcmk_rc_ok) { return TRUE; } session = pcmk__new_tls_session(tls, csock); if (session == NULL) { close(csock); return TRUE; } new_client = pcmk__new_unauth_client(NULL); new_client->remote = pcmk__assert_alloc(1, sizeof(pcmk__remote_t)); pcmk__set_client_flags(new_client, pcmk__client_tls); new_client->remote->tls_session = session; // Require the client to authenticate within this time new_client->remote->auth_timeout = pcmk__create_timer(LRMD_REMOTE_AUTH_TIMEOUT, lrmd_auth_timeout_cb, new_client); crm_info("Remote client pending authentication " QB_XS " %p id: %s", new_client, new_client->id); new_client->remote->source = mainloop_add_fd("pacemaker-remote-client", G_PRIORITY_DEFAULT, csock, new_client, &lrmd_remote_fd_cb); return TRUE; } static void tls_server_dropped(gpointer user_data) { crm_notice("TLS server session ended"); return; } // \return 0 on success, -1 on error (gnutls_psk_server_credentials_function) static int lrmd_tls_server_key_cb(gnutls_session_t session, const char *username, gnutls_datum_t * key) { return (lrmd__init_remote_key(key) == pcmk_rc_ok)? 0 : -1; } static int bind_and_listen(struct addrinfo *addr) { int optval; int fd; int rc; char buffer[INET6_ADDRSTRLEN] = { 0, }; pcmk__sockaddr2str(addr->ai_addr, buffer); crm_trace("Attempting to bind to address %s", buffer); fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol); if (fd < 0) { rc = errno; crm_err("Listener socket creation failed: %", pcmk_rc_str(rc)); return -rc; } /* reuse address */ optval = 1; rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (rc < 0) { rc = errno; crm_err("Local address reuse not allowed on %s: %s", buffer, pcmk_rc_str(rc)); close(fd); return -rc; } if (addr->ai_family == AF_INET6) { optval = 0; rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval)); if (rc < 0) { rc = errno; crm_err("Couldn't disable IPV6-only on %s: %s", buffer, pcmk_rc_str(rc)); close(fd); return -rc; } } if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) { rc = errno; crm_err("Cannot bind to %s: %s", buffer, pcmk_rc_str(rc)); close(fd); return -rc; } if (listen(fd, 10) == -1) { rc = errno; crm_err("Cannot listen on %s: %s", buffer, pcmk_rc_str(rc)); close(fd); return -rc; } return fd; } static int get_address_info(const char *bind_name, int port, struct addrinfo **res) { int rc; char port_str[6]; // at most "65535" struct addrinfo hints; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_flags = AI_PASSIVE; hints.ai_family = AF_UNSPEC; // IPv6 or IPv4 hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; snprintf(port_str, sizeof(port_str), "%d", port); rc = getaddrinfo(bind_name, port_str, &hints, res); rc = pcmk__gaierror2rc(rc); if (rc != pcmk_rc_ok) { crm_err("Unable to get IP address(es) for %s: %s", (bind_name? bind_name : "local node"), pcmk_rc_str(rc)); return rc; } return pcmk_rc_ok; } int lrmd_init_remote_tls_server(void) { int rc = pcmk_rc_ok; int filter; int port = crm_default_remote_port(); struct addrinfo *res = NULL, *iter; const char *bind_name = pcmk__env_option(PCMK__ENV_REMOTE_ADDRESS); bool use_cert = pcmk__x509_enabled(); static struct mainloop_fd_callbacks remote_listen_fd_callbacks = { .dispatch = lrmd_remote_listen, .destroy = tls_server_dropped, }; CRM_CHECK(ssock == -1, return ssock); crm_debug("Starting TLS listener on %s port %d", (bind_name? bind_name : "all addresses on"), port); rc = pcmk__init_tls(&tls, true, use_cert ? GNUTLS_CRD_CERTIFICATE : GNUTLS_CRD_PSK); if (rc != pcmk_rc_ok) { return -1; } if (!use_cert) { gnutls_datum_t psk_key = { NULL, 0 }; pcmk__tls_add_psk_callback(tls, lrmd_tls_server_key_cb); /* The key callback won't get called until the first client connection * attempt. Do it once here, so we can warn the user at start-up if we can't * read the key. We don't error out, though, because it's fine if the key is * going to be added later. */ if (lrmd__init_remote_key(&psk_key) != pcmk_rc_ok) { crm_warn("A cluster connection will not be possible until the key is available"); } gnutls_free(psk_key.data); } if (get_address_info(bind_name, port, &res) != pcmk_rc_ok) { return -1; } /* Currently we listen on only one address from the resulting list (the * first IPv6 address we can bind to if possible, otherwise the first IPv4 * address we can bind to). When bind_name is NULL, this should be the * respective wildcard address. * * @TODO If there is demand for specifying more than one address, allow * bind_name to be a space-separated list, call getaddrinfo() for each, * and create a socket for each result (set IPV6_V6ONLY on IPv6 sockets * since IPv4 listeners will have their own sockets). */ iter = res; filter = AF_INET6; while (iter) { if (iter->ai_family == filter) { ssock = bind_and_listen(iter); } if (ssock >= 0) { break; } iter = iter->ai_next; if (iter == NULL && filter == AF_INET6) { iter = res; filter = AF_INET; } } if (ssock >= 0) { mainloop_add_fd("pacemaker-remote-server", G_PRIORITY_DEFAULT, ssock, NULL, &remote_listen_fd_callbacks); crm_debug("Started TLS listener on %s port %d", (bind_name? bind_name : "all addresses on"), port); } freeaddrinfo(res); return ssock; } void execd_stop_tls_server(void) { if (tls != NULL) { pcmk__free_tls(tls); tls = NULL; } if (ssock >= 0) { close(ssock); ssock = -1; } } diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index 12b205e302..f2f519e868 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -1,3667 +1,3665 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include // xmlNode #include // xmlXPathObject, etc. #include #include #include #include #include #include #include #include #include GHashTable *device_list = NULL; GHashTable *topology = NULL; static GList *cmd_list = NULL; static GHashTable *fenced_handlers = NULL; struct device_search_s { /* target of fence action */ char *host; /* requested fence action */ char *action; /* timeout to use if a device is queried dynamically for possible targets */ // @TODO This name is misleading now, it's the value of stonith-timeout int per_device_timeout; /* number of registered fencing devices at time of request */ int replies_needed; /* number of device replies received so far */ int replies_received; /* whether the target is eligible to perform requested action (or off) */ bool allow_self; /* private data to pass to search callback function */ void *user_data; /* function to call when all replies have been received */ void (*callback) (GList * devices, void *user_data); /* devices capable of performing requested action (or off if remapping) */ GList *capable; /* Whether to perform searches that support the action */ uint32_t support_action_only; }; static gboolean stonith_device_dispatch(gpointer user_data); static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data); static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence); static int get_agent_metadata(const char *agent, xmlNode **metadata); static void read_action_metadata(stonith_device_t *device); static enum fenced_target_by unpack_level_kind(const xmlNode *level); typedef struct async_command_s { int id; int pid; int fd_stdout; uint32_t options; int default_timeout; /* seconds */ int timeout; /* seconds */ int start_delay; // seconds (-1 means disable static/random fencing delays) int delay_id; char *op; char *origin; char *client; char *client_name; char *remote_op_id; char *target; uint32_t target_nodeid; char *action; char *device; GList *device_list; GList *next_device_iter; // device_list entry for next device to execute void *internal_user_data; void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data); guint timer_sigterm; guint timer_sigkill; /*! If the operation timed out, this is the last signal * we sent to the process to get it to terminate */ int last_timeout_signo; stonith_device_t *active_on; stonith_device_t *activating_on; } async_command_t; static xmlNode *construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result); static gboolean is_action_required(const char *action, const stonith_device_t *device) { return (device != NULL) && device->automatic_unfencing && pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none); } static int get_action_delay_max(const stonith_device_t *device, const char *action) { const char *value = NULL; guint delay_max = 0U; if (!pcmk__is_fencing_action(action)) { return 0; } value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX); if (value) { pcmk_parse_interval_spec(value, &delay_max); delay_max /= 1000; } return (int) delay_max; } static int get_action_delay_base(const stonith_device_t *device, const char *action, const char *target) { char *hash_value = NULL; guint delay_base = 0U; if (!pcmk__is_fencing_action(action)) { return 0; } hash_value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE); if (hash_value) { char *value = pcmk__str_copy(hash_value); char *valptr = value; if (target != NULL) { for (char *val = strtok(value, "; \t"); val != NULL; val = strtok(NULL, "; \t")) { char *mapval = strchr(val, ':'); if (mapval == NULL || mapval[1] == 0) { crm_err("pcmk_delay_base: empty value in mapping", val); continue; } if (mapval != val && strncasecmp(target, val, (size_t)(mapval - val)) == 0) { value = mapval + 1; crm_debug("pcmk_delay_base mapped to %s for %s", value, target); break; } } } if (strchr(value, ':') == 0) { pcmk_parse_interval_spec(value, &delay_base); delay_base /= 1000; } free(valptr); } return (int) delay_base; } /*! * \internal * \brief Override STONITH timeout with pcmk_*_timeout if available * * \param[in] device STONITH device to use * \param[in] action STONITH action name * \param[in] default_timeout Timeout to use if device does not have * a pcmk_*_timeout parameter for action * * \return Value of pcmk_(action)_timeout if available, otherwise default_timeout * \note For consistency, it would be nice if reboot/off/on timeouts could be * set the same way as start/stop/monitor timeouts, i.e. with an * entry in the fencing resource configuration. However that * is insufficient because fencing devices may be registered directly via * the fencer's register_device() API instead of going through the CIB * (e.g. stonith_admin uses it for its -R option, and the executor uses it * to ensure a device is registered when a command is issued). As device * properties, pcmk_*_timeout parameters can be grabbed by the fencer when * the device is registered, whether by CIB change or API call. */ static int get_action_timeout(const stonith_device_t *device, const char *action, int default_timeout) { if (action && device && device->params) { char buffer[64] = { 0, }; const char *value = NULL; /* If "reboot" was requested but the device does not support it, * we will remap to "off", so check timeout for "off" instead */ if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk_is_set(device->flags, st_device_supports_reboot)) { crm_trace("%s doesn't support reboot, using timeout for off instead", device->id); action = PCMK_ACTION_OFF; } /* If the device config specified an action-specific timeout, use it */ snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, buffer); if (value) { long long timeout_ms = crm_get_msec(value); return (int) QB_MIN(pcmk__timeout_ms2s(timeout_ms), INT_MAX); } } return default_timeout; } /*! * \internal * \brief Get the currently executing device for a fencing operation * * \param[in] cmd Fencing operation to check * * \return Currently executing device for \p cmd if any, otherwise NULL */ static stonith_device_t * cmd_device(const async_command_t *cmd) { if ((cmd == NULL) || (cmd->device == NULL) || (device_list == NULL)) { return NULL; } return g_hash_table_lookup(device_list, cmd->device); } /*! * \internal * \brief Return the configured reboot action for a given device * * \param[in] device_id Device ID * * \return Configured reboot action for \p device_id */ const char * fenced_device_reboot_action(const char *device_id) { const char *action = NULL; if ((device_list != NULL) && (device_id != NULL)) { stonith_device_t *device = g_hash_table_lookup(device_list, device_id); if ((device != NULL) && (device->params != NULL)) { action = g_hash_table_lookup(device->params, "pcmk_reboot_action"); } } return pcmk__s(action, PCMK_ACTION_REBOOT); } /*! * \internal * \brief Check whether a given device supports the "on" action * * \param[in] device_id Device ID * * \return true if \p device_id supports "on", otherwise false */ bool fenced_device_supports_on(const char *device_id) { if ((device_list != NULL) && (device_id != NULL)) { stonith_device_t *device = g_hash_table_lookup(device_list, device_id); if (device != NULL) { return pcmk_is_set(device->flags, st_device_supports_on); } } return false; } static void free_async_command(async_command_t * cmd) { if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } cmd_list = g_list_remove(cmd_list, cmd); g_list_free_full(cmd->device_list, free); free(cmd->device); free(cmd->action); free(cmd->target); free(cmd->remote_op_id); free(cmd->client); free(cmd->client_name); free(cmd->origin); free(cmd->op); free(cmd); } /*! * \internal * \brief Create a new asynchronous fencing operation from request XML * * \param[in] msg Fencing request XML (from IPC or CPG) * * \return Newly allocated fencing operation on success, otherwise NULL * * \note This asserts on memory errors, so a NULL return indicates an * unparseable message. */ static async_command_t * create_async_command(xmlNode *msg) { xmlNode *op = NULL; async_command_t *cmd = NULL; int rc = pcmk_rc_ok; if (msg == NULL) { return NULL; } op = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_ERR); if (op == NULL) { return NULL; } cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); // All messages must include these - cmd->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION); - cmd->op = crm_element_value_copy(msg, PCMK__XA_ST_OP); - cmd->client = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTID); + cmd->action = pcmk__xe_get_copy(op, PCMK__XA_ST_DEVICE_ACTION); + cmd->op = pcmk__xe_get_copy(msg, PCMK__XA_ST_OP); + cmd->client = pcmk__xe_get_copy(msg, PCMK__XA_ST_CLIENTID); if ((cmd->action == NULL) || (cmd->op == NULL) || (cmd->client == NULL)) { free_async_command(cmd); return NULL; } pcmk__xe_get_int(msg, PCMK__XA_ST_CALLID, &(cmd->id)); pcmk__xe_get_int(msg, PCMK__XA_ST_DELAY, &(cmd->start_delay)); pcmk__xe_get_int(msg, PCMK__XA_ST_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; rc = pcmk__xe_get_flags(msg, PCMK__XA_ST_CALLOPT, &(cmd->options), st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } - cmd->origin = crm_element_value_copy(msg, PCMK__XA_SRC); - cmd->remote_op_id = crm_element_value_copy(msg, PCMK__XA_ST_REMOTE_OP); - cmd->client_name = crm_element_value_copy(msg, PCMK__XA_ST_CLIENTNAME); - cmd->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET); - cmd->device = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ID); + cmd->origin = pcmk__xe_get_copy(msg, PCMK__XA_SRC); + cmd->remote_op_id = pcmk__xe_get_copy(msg, PCMK__XA_ST_REMOTE_OP); + cmd->client_name = pcmk__xe_get_copy(msg, PCMK__XA_ST_CLIENTNAME); + cmd->target = pcmk__xe_get_copy(op, PCMK__XA_ST_TARGET); + cmd->device = pcmk__xe_get_copy(op, PCMK__XA_ST_DEVICE_ID); cmd->done_cb = st_child_done; // Track in global command list cmd_list = g_list_append(cmd_list, cmd); return cmd; } static int get_action_limit(stonith_device_t * device) { const char *value = NULL; int action_limit = 1; value = g_hash_table_lookup(device->params, PCMK_STONITH_ACTION_LIMIT); if ((value == NULL) || (pcmk__scan_min_int(value, &action_limit, INT_MIN) != pcmk_rc_ok) || (action_limit == 0)) { action_limit = 1; } return action_limit; } static int get_active_cmds(stonith_device_t * device) { int counter = 0; GList *gIter = NULL; GList *gIterNext = NULL; CRM_CHECK(device != NULL, return 0); for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd = gIter->data; gIterNext = gIter->next; if (cmd->active_on == device) { counter++; } } return counter; } static void fork_cb(int pid, void *user_data) { async_command_t *cmd = (async_command_t *) user_data; stonith_device_t * device = /* in case of a retry we've done the move from activating_on to active_on already */ cmd->activating_on?cmd->activating_on:cmd->active_on; pcmk__assert(device != NULL); crm_debug("Operation '%s' [%d]%s%s using %s now running with %ds timeout", cmd->action, pid, ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, cmd->timeout); cmd->active_on = device; cmd->activating_on = NULL; } static int get_agent_metadata_cb(gpointer data) { stonith_device_t *device = data; guint period_ms; switch (get_agent_metadata(device->agent, &device->agent_metadata)) { case pcmk_rc_ok: if (device->agent_metadata) { read_action_metadata(device); stonith__device_parameter_flags(&(device->flags), device->id, device->agent_metadata); } return G_SOURCE_REMOVE; case EAGAIN: period_ms = pcmk__mainloop_timer_get_period(device->timer); if (period_ms < 160 * 1000) { mainloop_timer_set_period(device->timer, 2 * period_ms); } return G_SOURCE_CONTINUE; default: return G_SOURCE_REMOVE; } } /*! * \internal * \brief Call a command's action callback for an internal (not library) result * * \param[in,out] cmd Command to report result for * \param[in] execution_status Execution status to use for result * \param[in] exit_status Exit status to use for result * \param[in] exit_reason Exit reason to use for result */ static void report_internal_result(async_command_t *cmd, int exit_status, int execution_status, const char *exit_reason) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; pcmk__set_result(&result, exit_status, execution_status, exit_reason); cmd->done_cb(0, &result, cmd); pcmk__reset_result(&result); } static gboolean stonith_device_execute(stonith_device_t * device) { int exec_rc = 0; const char *action_str = NULL; const char *host_arg = NULL; async_command_t *cmd = NULL; stonith_action_t *action = NULL; int active_cmds = 0; int action_limit = 0; GList *gIter = NULL; GList *gIterNext = NULL; CRM_CHECK(device != NULL, return FALSE); active_cmds = get_active_cmds(device); action_limit = get_action_limit(device); if (action_limit > -1 && active_cmds >= action_limit) { crm_trace("%s is over its action limit of %d (%u active action%s)", device->id, action_limit, active_cmds, pcmk__plural_s(active_cmds)); return TRUE; } for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) { async_command_t *pending_op = gIter->data; gIterNext = gIter->next; if (pending_op && pending_op->delay_id) { crm_trace("Operation '%s'%s%s using %s was asked to run too early, " "waiting for start delay of %ds", pending_op->action, ((pending_op->target == NULL)? "" : " targeting "), pcmk__s(pending_op->target, ""), device->id, pending_op->start_delay); continue; } device->pending_ops = g_list_remove_link(device->pending_ops, gIter); g_list_free_1(gIter); cmd = pending_op; break; } if (cmd == NULL) { crm_trace("No actions using %s are needed", device->id); return TRUE; } if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { if (pcmk__is_fencing_action(cmd->action)) { if (node_does_watchdog_fencing(fenced_get_local_node())) { pcmk__panic("Watchdog self-fencing required"); goto done; } } else { crm_info("Faking success for %s watchdog operation", cmd->action); report_internal_result(cmd, CRM_EX_OK, PCMK_EXEC_DONE, NULL); goto done; } } #if PCMK__ENABLE_CIBSECRETS exec_rc = pcmk__substitute_secrets(device->id, device->params); if (exec_rc != pcmk_rc_ok) { if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) { crm_info("Proceeding with stop operation for %s " "despite being unable to load CIB secrets (%s)", device->id, pcmk_rc_str(exec_rc)); } else { crm_err("Considering %s unconfigured " "because unable to load CIB secrets: %s", device->id, pcmk_rc_str(exec_rc)); report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_SECRETS, "Failed to get CIB secrets"); goto done; } } #endif action_str = cmd->action; if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk_is_set(device->flags, st_device_supports_reboot)) { crm_notice("Remapping 'reboot' action%s%s using %s to 'off' " "because agent '%s' does not support reboot", ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, device->agent); action_str = PCMK_ACTION_OFF; } if (pcmk_is_set(device->flags, st_device_supports_parameter_port)) { host_arg = "port"; } else if (pcmk_is_set(device->flags, st_device_supports_parameter_plug)) { host_arg = "plug"; } action = stonith__action_create(device->agent, action_str, cmd->target, cmd->target_nodeid, cmd->timeout, device->params, device->aliases, host_arg); /* for async exec, exec_rc is negative for early error exit otherwise handling of success/errors is done via callbacks */ cmd->activating_on = device; exec_rc = stonith__execute_async(action, (void *)cmd, cmd->done_cb, fork_cb); if (exec_rc < 0) { cmd->activating_on = NULL; cmd->done_cb(0, stonith__action_result(action), cmd); stonith__destroy_action(action); } done: /* Device might get triggered to work by multiple fencing commands * simultaneously. Trigger the device again to make sure any * remaining concurrent commands get executed. */ if (device->pending_ops) { mainloop_set_trigger(device->work); } return TRUE; } static gboolean stonith_device_dispatch(gpointer user_data) { return stonith_device_execute(user_data); } static gboolean start_delay_helper(gpointer data) { async_command_t *cmd = data; stonith_device_t *device = cmd_device(cmd); cmd->delay_id = 0; if (device) { mainloop_set_trigger(device->work); } return FALSE; } static void schedule_stonith_command(async_command_t * cmd, stonith_device_t * device) { int delay_max = 0; int delay_base = 0; int requested_delay = cmd->start_delay; CRM_CHECK(cmd != NULL, return); CRM_CHECK(device != NULL, return); if (cmd->device) { free(cmd->device); } if (device->include_nodeid && (cmd->target != NULL)) { pcmk__node_status_t *node = pcmk__get_node(0, cmd->target, NULL, pcmk__node_search_cluster_member); cmd->target_nodeid = node->cluster_layer_id; } cmd->device = pcmk__str_copy(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); if (cmd->remote_op_id) { crm_debug("Scheduling '%s' action%s%s using %s for remote peer %s " "with op id %.8s and timeout %ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->origin, cmd->remote_op_id, cmd->timeout); } else { crm_debug("Scheduling '%s' action%s%s using %s for %s with timeout %ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->client, cmd->timeout); } device->pending_ops = g_list_append(device->pending_ops, cmd); mainloop_set_trigger(device->work); // Value -1 means disable any static/random fencing delays if (requested_delay < 0) { return; } delay_max = get_action_delay_max(device, cmd->action); delay_base = get_action_delay_base(device, cmd->action, cmd->target); if (delay_max == 0) { delay_max = delay_base; } if (delay_max < delay_base) { crm_warn(PCMK_STONITH_DELAY_BASE " (%ds) is larger than " PCMK_STONITH_DELAY_MAX " (%ds) for %s using %s " "(limiting to maximum delay)", delay_base, delay_max, cmd->action, device->id); delay_base = delay_max; } if (delay_max > 0) { // coverity[dontcall] It doesn't matter here if rand() is predictable cmd->start_delay += ((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0) + delay_base; } if (cmd->start_delay > 0) { crm_notice("Delaying '%s' action%s%s using %s for %ds " QB_XS " timeout=%ds requested_delay=%ds base=%ds max=%ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->start_delay, cmd->timeout, requested_delay, delay_base, delay_max); cmd->delay_id = pcmk__create_timer(cmd->start_delay * 1000, start_delay_helper, cmd); } } static void free_device(gpointer data) { GList *gIter = NULL; stonith_device_t *device = data; g_hash_table_destroy(device->params); g_hash_table_destroy(device->aliases); for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) { async_command_t *cmd = gIter->data; crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action); report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Device was removed before action could be executed"); } g_list_free(device->pending_ops); g_list_free_full(device->targets, free); if (device->timer) { mainloop_timer_stop(device->timer); mainloop_timer_del(device->timer); } mainloop_destroy_trigger(device->work); pcmk__xml_free(device->agent_metadata); free(device->namespace); if (device->on_target_actions != NULL) { g_string_free(device->on_target_actions, TRUE); } free(device->agent); free(device->id); free(device); } void free_device_list(void) { if (device_list != NULL) { g_hash_table_destroy(device_list); device_list = NULL; } } void init_device_list(void) { if (device_list == NULL) { device_list = pcmk__strkey_table(NULL, free_device); } } static GHashTable * build_port_aliases(const char *hostmap, GList ** targets) { char *name = NULL; int last = 0, lpc = 0, max = 0, added = 0; GHashTable *aliases = pcmk__strikey_table(free, free); if (hostmap == NULL) { return aliases; } max = strlen(hostmap); for (; lpc <= max; lpc++) { switch (hostmap[lpc]) { /* Skip escaped chars */ case '\\': lpc++; break; /* Assignment chars */ case '=': case ':': if (lpc > last) { free(name); name = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; break; /* Delimeter chars */ /* case ',': Potentially used to specify multiple ports */ case 0: case ';': case ' ': case '\t': if (name) { char *value = NULL; int k = 0; value = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); for (int i = 0; value[i] != '\0'; i++) { if (value[i] != '\\') { value[k++] = value[i]; } } value[k] = '\0'; crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if (targets) { *targets = g_list_append(*targets, pcmk__str_copy(value)); } value = NULL; name = NULL; added++; } else if (lpc > last) { crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last); } last = lpc + 1; break; } if (hostmap[lpc] == 0) { break; } } if (added == 0) { crm_info("No host mappings detected in '%s'", hostmap); } free(name); return aliases; } GHashTable *metadata_cache = NULL; void free_metadata_cache(void) { if (metadata_cache != NULL) { g_hash_table_destroy(metadata_cache); metadata_cache = NULL; } } static void init_metadata_cache(void) { if (metadata_cache == NULL) { metadata_cache = pcmk__strkey_table(free, free); } } int get_agent_metadata(const char *agent, xmlNode ** metadata) { char *buffer = NULL; if (metadata == NULL) { return EINVAL; } *metadata = NULL; if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) { return pcmk_rc_ok; } init_metadata_cache(); buffer = g_hash_table_lookup(metadata_cache, agent); if (buffer == NULL) { stonith_t *st = stonith_api_new(); int rc; if (st == NULL) { crm_warn("Could not get agent meta-data: " "API memory allocation failed"); return EAGAIN; } rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); stonith_api_delete(st); if (rc || !buffer) { crm_err("Could not retrieve metadata for fencing agent %s", agent); return EAGAIN; } g_hash_table_replace(metadata_cache, pcmk__str_copy(agent), buffer); } *metadata = pcmk__xml_parse(buffer); return pcmk_rc_ok; } static gboolean is_nodeid_required(xmlNode * xml) { xmlXPathObject *xpath = NULL; if (!xml) { return FALSE; } xpath = pcmk__xpath_search(xml->doc, "//" PCMK_XE_PARAMETER "[@" PCMK_XA_NAME "='nodeid']"); if (pcmk__xpath_num_results(xpath) == 0) { xmlXPathFreeObject(xpath); return FALSE; } xmlXPathFreeObject(xpath); return TRUE; } static void read_action_metadata(stonith_device_t *device) { xmlXPathObject *xpath = NULL; int max = 0; int lpc = 0; if (device->agent_metadata == NULL) { return; } xpath = pcmk__xpath_search(device->agent_metadata->doc, "//" PCMK_XE_ACTION); max = pcmk__xpath_num_results(xpath); if (max == 0) { xmlXPathFreeObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *action = NULL; xmlNode *match = pcmk__xpath_result(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; action = pcmk__xe_get(match, PCMK_XA_NAME); if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_list); } else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_status); } else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { stonith__set_device_flags(device->flags, device->id, st_device_supports_reboot); } else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { /* PCMK_XA_AUTOMATIC means the cluster will unfence a node when it * joins. * * @COMPAT PCMK__XA_REQUIRED is a deprecated synonym for * PCMK_XA_AUTOMATIC. */ if (pcmk__xe_attr_is_true(match, PCMK_XA_AUTOMATIC) || pcmk__xe_attr_is_true(match, PCMK__XA_REQUIRED)) { device->automatic_unfencing = TRUE; } stonith__set_device_flags(device->flags, device->id, st_device_supports_on); } if ((action != NULL) && pcmk__xe_attr_is_true(match, PCMK_XA_ON_TARGET)) { pcmk__add_word(&(device->on_target_actions), 64, action); } } xmlXPathFreeObject(xpath); } static const char * target_list_type(stonith_device_t * dev) { const char *check_type = NULL; check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK); if (check_type == NULL) { if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { check_type = PCMK_VALUE_STATIC_LIST; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { check_type = PCMK_VALUE_STATIC_LIST; } else if (pcmk_is_set(dev->flags, st_device_supports_list)) { check_type = PCMK_VALUE_DYNAMIC_LIST; } else if (pcmk_is_set(dev->flags, st_device_supports_status)) { check_type = PCMK_VALUE_STATUS; } else { check_type = PCMK_VALUE_NONE; } } return check_type; } static stonith_device_t * build_device_from_xml(xmlNode *dev) { const char *value; stonith_device_t *device = NULL; - char *agent = crm_element_value_copy(dev, PCMK_XA_AGENT); + char *agent = pcmk__xe_get_copy(dev, PCMK_XA_AGENT); CRM_CHECK(agent != NULL, return device); device = pcmk__assert_alloc(1, sizeof(stonith_device_t)); - device->id = crm_element_value_copy(dev, PCMK_XA_ID); + device->id = pcmk__xe_get_copy(dev, PCMK_XA_ID); device->agent = agent; - device->namespace = crm_element_value_copy(dev, PCMK__XA_NAMESPACE); + device->namespace = pcmk__xe_get_copy(dev, PCMK__XA_NAMESPACE); device->params = xml2list(dev); value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST); if (value) { device->targets = stonith__parse_targets(value); } value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP); device->aliases = build_port_aliases(value, &(device->targets)); value = target_list_type(device); if (!pcmk__str_eq(value, PCMK_VALUE_STATIC_LIST, pcmk__str_casei) && (device->targets != NULL)) { // device->targets is necessary only with PCMK_VALUE_STATIC_LIST g_list_free_full(device->targets, free); device->targets = NULL; } switch (get_agent_metadata(device->agent, &device->agent_metadata)) { case pcmk_rc_ok: if (device->agent_metadata) { read_action_metadata(device); stonith__device_parameter_flags(&(device->flags), device->id, device->agent_metadata); } break; case EAGAIN: if (device->timer == NULL) { device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, TRUE, get_agent_metadata_cb, device); } if (!mainloop_timer_running(device->timer)) { mainloop_timer_start(device->timer); } break; default: break; } value = g_hash_table_lookup(device->params, "nodeid"); if (!value) { device->include_nodeid = is_nodeid_required(device->agent_metadata); } value = pcmk__xe_get(dev, PCMK__XA_RSC_PROVIDES); if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) { device->automatic_unfencing = TRUE; } if (is_action_required(PCMK_ACTION_ON, device)) { crm_info("Fencing device '%s' requires unfencing", device->id); } if (device->on_target_actions != NULL) { crm_info("Fencing device '%s' requires actions (%s) to be executed " "on target", device->id, (const char *) device->on_target_actions->str); } device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device); /* TODO: Hook up priority */ return device; } static void schedule_internal_command(const char *origin, stonith_device_t * device, const char *action, const char *target, int timeout, void *internal_user_data, void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data)) { async_command_t *cmd = NULL; cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); cmd->id = -1; cmd->default_timeout = timeout ? timeout : 60; cmd->timeout = cmd->default_timeout; cmd->action = pcmk__str_copy(action); cmd->target = pcmk__str_copy(target); cmd->device = pcmk__str_copy(device->id); cmd->origin = pcmk__str_copy(origin); cmd->client = pcmk__str_copy(crm_system_name); cmd->client_name = pcmk__str_copy(crm_system_name); cmd->internal_user_data = internal_user_data; cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */ schedule_stonith_command(cmd, device); } // Fence agent status commands use custom exit status codes enum fence_status_code { fence_status_invalid = -1, fence_status_active = 0, fence_status_unknown = 1, fence_status_inactive = 2, }; static void status_search_cb(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd_device(cmd); gboolean can = FALSE; free_async_command(cmd); if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (result->execution_status != PCMK_EXEC_DONE) { crm_warn("Assuming %s cannot fence %s " "because status could not be executed: %s%s%s%s", dev->id, search->host, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); search_devices_record_result(search, dev->id, FALSE); return; } switch (result->exit_status) { case fence_status_unknown: crm_trace("%s reported it cannot fence %s", dev->id, search->host); break; case fence_status_active: case fence_status_inactive: crm_trace("%s reported it can fence %s", dev->id, search->host); can = TRUE; break; default: crm_warn("Assuming %s cannot fence %s " "(status returned unknown code %d)", dev->id, search->host, result->exit_status); break; } search_devices_record_result(search, dev->id, can); } static void dynamic_list_search_cb(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; stonith_device_t *dev = cmd_device(cmd); gboolean can_fence = FALSE; free_async_command(cmd); /* Host/alias must be in the list output to be eligible to be fenced * * Will cause problems if down'd nodes aren't listed or (for virtual nodes) * if the guest is still listed despite being moved to another machine */ if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (pcmk__result_ok(result)) { crm_info("Refreshing target list for %s", dev->id); g_list_free_full(dev->targets, free); dev->targets = stonith__parse_targets(result->action_stdout); dev->targets_age = time(NULL); } else if (dev->targets != NULL) { if (result->execution_status == PCMK_EXEC_DONE) { crm_info("Reusing most recent target list for %s " "because list returned error code %d", dev->id, result->exit_status); } else { crm_info("Reusing most recent target list for %s " "because list could not be executed: %s%s%s%s", dev->id, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); } } else { // We have never successfully executed list if (result->execution_status == PCMK_EXEC_DONE) { crm_warn("Assuming %s cannot fence %s " "because list returned error code %d", dev->id, search->host, result->exit_status); } else { crm_warn("Assuming %s cannot fence %s " "because list could not be executed: %s%s%s%s", dev->id, search->host, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); } /* Fall back to pcmk_host_check=PCMK_VALUE_STATUS if the user didn't * explicitly specify PCMK_VALUE_DYNAMIC_LIST */ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) { crm_notice("Switching to pcmk_host_check='status' for %s", dev->id); pcmk__insert_dup(dev->params, PCMK_STONITH_HOST_CHECK, PCMK_VALUE_STATUS); } } if (dev->targets) { const char *alias = g_hash_table_lookup(dev->aliases, search->host); if (!alias) { alias = search->host; } if (pcmk__str_in_list(alias, dev->targets, pcmk__str_casei)) { can_fence = TRUE; } } search_devices_record_result(search, dev->id, can_fence); } /*! * \internal * \brief Returns true if any key in first is not in second or second has a different value for key */ static int device_params_diff(GHashTable *first, GHashTable *second) { char *key = NULL; char *value = NULL; GHashTableIter gIter; g_hash_table_iter_init(&gIter, first); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) { if(strstr(key, "CRM_meta") == key) { continue; } else if (strcmp(key, PCMK_XA_CRM_FEATURE_SET) == 0) { continue; } else { char *other_value = g_hash_table_lookup(second, key); if (!other_value || !pcmk__str_eq(other_value, value, pcmk__str_casei)) { crm_trace("Different value for %s: %s != %s", key, other_value, value); return 1; } } } return 0; } /*! * \internal * \brief Checks to see if an identical device already exists in the device_list */ static stonith_device_t * device_has_duplicate(const stonith_device_t *device) { stonith_device_t *dup = g_hash_table_lookup(device_list, device->id); if (!dup) { crm_trace("No match for %s", device->id); return NULL; } else if (!pcmk__str_eq(dup->agent, device->agent, pcmk__str_casei)) { crm_trace("Different agent: %s != %s", dup->agent, device->agent); return NULL; } // Use pcmk__digest_operation() here? if (device_params_diff(device->params, dup->params) || device_params_diff(dup->params, device->params)) { return NULL; } crm_trace("Match"); return dup; } int stonith_device_register(xmlNode *dev, gboolean from_cib) { stonith_device_t *dup = NULL; stonith_device_t *device = build_device_from_xml(dev); guint ndevices = 0; int rv = pcmk_ok; CRM_CHECK(device != NULL, return -ENOMEM); /* do we have a watchdog-device? */ if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none) || pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) do { if (stonith_watchdog_timeout_ms <= 0) { crm_err("Ignoring watchdog fence device without " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " set."); rv = -ENODEV; /* fall through to cleanup & return */ } else if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { crm_err("Ignoring watchdog fence device with unknown " "agent '%s' unequal '" STONITH_WATCHDOG_AGENT "'.", device->agent?device->agent:""); rv = -ENODEV; /* fall through to cleanup & return */ } else if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { crm_err("Ignoring watchdog fence device " "named %s !='"STONITH_WATCHDOG_ID"'.", device->id?device->id:""); rv = -ENODEV; /* fall through to cleanup & return */ } else { const char *local_node_name = fenced_get_local_node(); if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { /* this either has an empty list or the targets configured for watchdog-fencing */ g_list_free_full(stonith_watchdog_targets, free); stonith_watchdog_targets = device->targets; device->targets = NULL; } if (node_does_watchdog_fencing(local_node_name)) { g_list_free_full(device->targets, free); device->targets = stonith__parse_targets(local_node_name); pcmk__insert_dup(device->params, PCMK_STONITH_HOST_LIST, local_node_name); /* proceed as with any other stonith-device */ break; } crm_debug("Skip registration of watchdog fence device on node not in host-list."); /* cleanup and fall through to more cleanup and return */ device->targets = NULL; stonith_device_remove(device->id, from_cib); } free_device(device); return rv; } while (0); dup = device_has_duplicate(device); if (dup) { ndevices = g_hash_table_size(device_list); crm_debug("Device '%s' already in device list (%d active device%s)", device->id, ndevices, pcmk__plural_s(ndevices)); free_device(device); device = dup; dup = g_hash_table_lookup(device_list, device->id); dup->dirty = FALSE; } else { stonith_device_t *old = g_hash_table_lookup(device_list, device->id); if (from_cib && old && old->api_registered) { /* If the cib is writing over an entry that is shared with a stonith client, * copy any pending ops that currently exist on the old entry to the new one. * Otherwise the pending ops will be reported as failures */ crm_info("Overwriting existing entry for %s from CIB", device->id); device->pending_ops = old->pending_ops; device->api_registered = TRUE; old->pending_ops = NULL; if (device->pending_ops) { mainloop_set_trigger(device->work); } } g_hash_table_replace(device_list, device->id, device); ndevices = g_hash_table_size(device_list); crm_notice("Added '%s' to device list (%d active device%s)", device->id, ndevices, pcmk__plural_s(ndevices)); } if (from_cib) { device->cib_registered = TRUE; } else { device->api_registered = TRUE; } return pcmk_ok; } void stonith_device_remove(const char *id, bool from_cib) { stonith_device_t *device = g_hash_table_lookup(device_list, id); guint ndevices = 0; if (!device) { ndevices = g_hash_table_size(device_list); crm_info("Device '%s' not found (%d active device%s)", id, ndevices, pcmk__plural_s(ndevices)); return; } if (from_cib) { device->cib_registered = FALSE; } else { device->verified = FALSE; device->api_registered = FALSE; } if (!device->cib_registered && !device->api_registered) { g_hash_table_remove(device_list, id); ndevices = g_hash_table_size(device_list); crm_info("Removed '%s' from device list (%d active device%s)", id, ndevices, pcmk__plural_s(ndevices)); } else { crm_trace("Not removing '%s' from device list (%d active) because " "still registered via:%s%s", id, g_hash_table_size(device_list), (device->cib_registered? " cib" : ""), (device->api_registered? " api" : "")); } } /*! * \internal * \brief Return the number of stonith levels registered for a node * * \param[in] tp Node's topology table entry * * \return Number of non-NULL levels in topology entry * \note This function is used only for log messages. */ static int count_active_levels(const stonith_topology_t *tp) { int lpc = 0; int count = 0; for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) { if (tp->levels[lpc] != NULL) { count++; } } return count; } static void free_topology_entry(gpointer data) { stonith_topology_t *tp = data; int lpc = 0; for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) { if (tp->levels[lpc] != NULL) { g_list_free_full(tp->levels[lpc], free); } } free(tp->target); free(tp->target_value); free(tp->target_pattern); free(tp->target_attribute); free(tp); } void free_topology_list(void) { if (topology != NULL) { g_hash_table_destroy(topology); topology = NULL; } } void init_topology_list(void) { if (topology == NULL) { topology = pcmk__strkey_table(NULL, free_topology_entry); } } char * stonith_level_key(const xmlNode *level, enum fenced_target_by mode) { if (mode == fenced_target_by_unknown) { mode = unpack_level_kind(level); } switch (mode) { case fenced_target_by_name: - return crm_element_value_copy(level, PCMK_XA_TARGET); + return pcmk__xe_get_copy(level, PCMK_XA_TARGET); case fenced_target_by_pattern: - return crm_element_value_copy(level, PCMK_XA_TARGET_PATTERN); + return pcmk__xe_get_copy(level, PCMK_XA_TARGET_PATTERN); case fenced_target_by_attribute: return crm_strdup_printf("%s=%s", pcmk__xe_get(level, PCMK_XA_TARGET_ATTRIBUTE), pcmk__xe_get(level, PCMK_XA_TARGET_VALUE)); default: return crm_strdup_printf("unknown-%s", pcmk__xe_id(level)); } } /*! * \internal * \brief Parse target identification from topology level XML * * \param[in] level Topology level XML to parse * * \return How to identify target of \p level */ static enum fenced_target_by unpack_level_kind(const xmlNode *level) { if (pcmk__xe_get(level, PCMK_XA_TARGET) != NULL) { return fenced_target_by_name; } if (pcmk__xe_get(level, PCMK_XA_TARGET_PATTERN) != NULL) { return fenced_target_by_pattern; } if ((pcmk__xe_get(level, PCMK_XA_TARGET_ATTRIBUTE) != NULL) && (pcmk__xe_get(level, PCMK_XA_TARGET_VALUE) != NULL)) { return fenced_target_by_attribute; } return fenced_target_by_unknown; } static stonith_key_value_t * parse_device_list(const char *devices) { int lpc = 0; int max = 0; int last = 0; stonith_key_value_t *output = NULL; if (devices == NULL) { return output; } max = strlen(devices); for (lpc = 0; lpc <= max; lpc++) { if (devices[lpc] == ',' || devices[lpc] == 0) { char *line = strndup(devices + last, lpc - last); output = stonith_key_value_add(output, NULL, line); free(line); last = lpc + 1; } } return output; } /*! * \internal * \brief Unpack essential information from topology request XML * * \param[in] xml Request XML to search * \param[out] mode If not NULL, where to store level kind * \param[out] target If not NULL, where to store representation of target * \param[out] id If not NULL, where to store level number * \param[out] desc If not NULL, where to store log-friendly level description * * \return Topology level XML from within \p xml, or NULL if not found * \note The caller is responsible for freeing \p *target and \p *desc if set. */ static xmlNode * unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target, int *id, char **desc) { enum fenced_target_by local_mode = fenced_target_by_unknown; char *local_target = NULL; int local_id = 0; /* The level element can be the top element or lower. If top level, don't * search by xpath, because it might give multiple hits if the XML is the * CIB. */ if ((xml != NULL) && !pcmk__xe_is(xml, PCMK_XE_FENCING_LEVEL)) { xml = pcmk__xpath_find_one(xml->doc, "//" PCMK_XE_FENCING_LEVEL, LOG_WARNING); } if (xml == NULL) { if (desc != NULL) { *desc = crm_strdup_printf("missing"); } } else { local_mode = unpack_level_kind(xml); local_target = stonith_level_key(xml, local_mode); pcmk__xe_get_int(xml, PCMK_XA_INDEX, &local_id); if (desc != NULL) { *desc = crm_strdup_printf("%s[%d]", local_target, local_id); } } if (mode != NULL) { *mode = local_mode; } if (id != NULL) { *id = local_id; } if (target != NULL) { *target = local_target; } else { free(local_target); } return xml; } /*! * \internal * \brief Register a fencing topology level for a target * * Given an XML request specifying the target name, level index, and device IDs * for the level, this will create an entry for the target in the global topology * table if one does not already exist, then append the specified device IDs to * the entry's device list for the specified level. * * \param[in] msg XML request for STONITH level registration * \param[out] desc If not NULL, set to string representation "TARGET[LEVEL]" * \param[out] result Where to set result of registration */ void fenced_register_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) { int id = 0; xmlNode *level; enum fenced_target_by mode; char *target; stonith_topology_t *tp; stonith_key_value_t *dIter = NULL; stonith_key_value_t *devices = NULL; CRM_CHECK((msg != NULL) && (result != NULL), return); level = unpack_level_request(msg, &mode, &target, &id, desc); if (level == NULL) { fenced_set_protocol_error(result); return; } // Ensure an ID was given (even the client API adds an ID) if (pcmk__str_empty(pcmk__xe_id(level))) { crm_warn("Ignoring registration for topology level without ID"); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Topology level is invalid without ID"); return; } // Ensure a valid target was specified if (mode == fenced_target_by_unknown) { crm_warn("Ignoring registration for topology level '%s' " "without valid target", pcmk__xe_id(level)); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid target for topology level '%s'", pcmk__xe_id(level)); return; } // Ensure level ID is in allowed range if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) { crm_warn("Ignoring topology registration for %s with invalid level %d", target, id); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level '%s'", pcmk__s(pcmk__xe_get(level, PCMK_XA_INDEX), ""), pcmk__xe_id(level)); return; } /* Find or create topology table entry */ tp = g_hash_table_lookup(topology, target); if (tp == NULL) { tp = pcmk__assert_alloc(1, sizeof(stonith_topology_t)); tp->kind = mode; tp->target = target; - tp->target_value = crm_element_value_copy(level, PCMK_XA_TARGET_VALUE); - tp->target_pattern = crm_element_value_copy(level, - PCMK_XA_TARGET_PATTERN); - tp->target_attribute = crm_element_value_copy(level, - PCMK_XA_TARGET_ATTRIBUTE); + tp->target_value = pcmk__xe_get_copy(level, PCMK_XA_TARGET_VALUE); + tp->target_pattern = pcmk__xe_get_copy(level, PCMK_XA_TARGET_PATTERN); + tp->target_attribute = pcmk__xe_get_copy(level, PCMK_XA_TARGET_ATTRIBUTE); g_hash_table_replace(topology, tp->target, tp); crm_trace("Added %s (%d) to the topology (%d active entries)", target, (int) mode, g_hash_table_size(topology)); } else { free(target); } if (tp->levels[id] != NULL) { crm_info("Adding to the existing %s[%d] topology entry", tp->target, id); } devices = parse_device_list(pcmk__xe_get(level, PCMK_XA_DEVICES)); for (dIter = devices; dIter; dIter = dIter->next) { const char *device = dIter->value; crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id); tp->levels[id] = g_list_append(tp->levels[id], pcmk__str_copy(device)); } stonith_key_value_freeall(devices, 1, 1); { int nlevels = count_active_levels(tp); crm_info("Target %s has %d active fencing level%s", tp->target, nlevels, pcmk__plural_s(nlevels)); } pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } /*! * \internal * \brief Unregister a fencing topology level for a target * * Given an XML request specifying the target name and level index (or 0 for all * levels), this will remove any corresponding entry for the target from the * global topology table. * * \param[in] msg XML request for STONITH level registration * \param[out] desc If not NULL, set to string representation "TARGET[LEVEL]" * \param[out] result Where to set result of unregistration */ void fenced_unregister_level(xmlNode *msg, char **desc, pcmk__action_result_t *result) { int id = -1; stonith_topology_t *tp; char *target; xmlNode *level = NULL; CRM_CHECK(result != NULL, return); level = unpack_level_request(msg, NULL, &target, &id, desc); if (level == NULL) { fenced_set_protocol_error(result); return; } // Ensure level ID is in allowed range if ((id < 0) || (id >= ST__LEVEL_COUNT)) { crm_warn("Ignoring topology unregistration for %s with invalid level %d", target, id); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level %s", pcmk__s(pcmk__xe_get(level, PCMK_XA_INDEX), ""), // Client API doesn't add ID to unregistration XML pcmk__s(pcmk__xe_id(level), "")); return; } tp = g_hash_table_lookup(topology, target); if (tp == NULL) { guint nentries = g_hash_table_size(topology); crm_info("No fencing topology found for %s (%d active %s)", target, nentries, pcmk__plural_alt(nentries, "entry", "entries")); } else if (id == 0 && g_hash_table_remove(topology, target)) { guint nentries = g_hash_table_size(topology); crm_info("Removed all fencing topology entries related to %s " "(%d active %s remaining)", target, nentries, pcmk__plural_alt(nentries, "entry", "entries")); } else if (tp->levels[id] != NULL) { guint nlevels; g_list_free_full(tp->levels[id], free); tp->levels[id] = NULL; nlevels = count_active_levels(tp); crm_info("Removed level %d from fencing topology for %s " "(%d active level%s remaining)", id, target, nlevels, pcmk__plural_s(nlevels)); } free(target); pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } static char * list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) { int max = g_list_length(list); size_t delim_len = delim?strlen(delim):0; size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0); char *rv; GList *gIter; char *pos = NULL; const char *lead_delim = ""; for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; alloc_size += strlen(value); } rv = pcmk__assert_alloc(alloc_size, sizeof(char)); pos = rv; for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; lead_delim = delim; } if (max && terminate_with_delim) { sprintf(pos, "%s", delim); } return rv; } /*! * \internal * \brief Execute a fence agent action directly (and asynchronously) * * Handle a STONITH_OP_EXEC API message by scheduling a requested agent action * directly on a specified device. Only list, monitor, and status actions are * expected to use this call, though it should work with any agent command. * * \param[in] msg Request XML specifying action * \param[out] result Where to store result of action * * \note If the action is monitor, the device must be registered via the API * (CIB registration is not sufficient), because monitor should not be * possible unless the device is "started" (API registered). */ static void execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) { xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_ST_DEVICE_ID, LOG_ERR); xmlNode *op = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_ERR); const char *id = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); const char *action = pcmk__xe_get(op, PCMK__XA_ST_DEVICE_ACTION); async_command_t *cmd = NULL; stonith_device_t *device = NULL; if ((id == NULL) || (action == NULL)) { crm_info("Malformed API action request: device %s, action %s", (id? id : "not specified"), (action? action : "not specified")); fenced_set_protocol_error(result); return; } if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) { // Watchdog agent actions are implemented internally if (stonith_watchdog_timeout_ms <= 0) { pcmk__set_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Watchdog fence device not configured"); return; } else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_result_output(result, list_to_string(stonith_watchdog_targets, "\n", TRUE), NULL); return; } else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return; } } device = g_hash_table_lookup(device_list, id); if (device == NULL) { crm_info("Ignoring API '%s' action request because device %s not found", action, id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "'%s' not found", id); return; } else if (!device->api_registered && (strcmp(action, PCMK_ACTION_MONITOR) == 0)) { // Monitors may run only on "started" (API-registered) devices crm_info("Ignoring API '%s' action request because device %s not active", action, id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "'%s' not active", id); return; } cmd = create_async_command(msg); if (cmd == NULL) { crm_log_xml_warn(msg, "invalid"); fenced_set_protocol_error(result); return; } schedule_stonith_command(cmd, device); pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence) { search->replies_received++; if (can_fence && device) { if (search->support_action_only != st_device_supports_none) { stonith_device_t *dev = g_hash_table_lookup(device_list, device); if (dev && !pcmk_is_set(dev->flags, search->support_action_only)) { return; } } search->capable = g_list_append(search->capable, pcmk__str_copy(device)); } if (search->replies_needed == search->replies_received) { guint ndevices = g_list_length(search->capable); crm_debug("Search found %d device%s that can perform '%s' targeting %s", ndevices, pcmk__plural_s(ndevices), (search->action? search->action : "unknown action"), (search->host? search->host : "any node")); search->callback(search->capable, search->user_data); free(search->host); free(search->action); free(search); } } /*! * \internal * \brief Check whether the local host is allowed to execute a fencing action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Hostname of fence target * \param[in] allow_self Whether self-fencing is allowed for this operation * * \return TRUE if local host is allowed to execute action, FALSE otherwise */ static gboolean localhost_is_eligible(const stonith_device_t *device, const char *action, const char *target, gboolean allow_self) { gboolean localhost_is_target = pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei); if ((device != NULL) && (action != NULL) && (device->on_target_actions != NULL) && (strstr((const char*) device->on_target_actions->str, action) != NULL)) { if (!localhost_is_target) { crm_trace("Operation '%s' using %s can only be executed for local " "host, not %s", action, device->id, target); return FALSE; } } else if (localhost_is_target && !allow_self) { crm_trace("'%s' operation does not support self-fencing", action); return FALSE; } return TRUE; } /*! * \internal * \brief Check if local node is allowed to execute (possibly remapped) action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Node name of fence target * \param[in] allow_self Whether self-fencing is allowed for this operation * * \return true if local node is allowed to execute \p action or any actions it * might be remapped to, otherwise false */ static bool localhost_is_eligible_with_remap(const stonith_device_t *device, const char *action, const char *target, gboolean allow_self) { // Check exact action if (localhost_is_eligible(device, action, target, allow_self)) { return true; } // Check potential remaps if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* "reboot" might get remapped to "off" then "on", so even if reboot is * disallowed, return true if either of those is allowed. We'll report * the disallowed actions with the results. We never allow self-fencing * for remapped "on" actions because the target is off at that point. */ if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self) || localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) { return true; } } return false; } /*! * \internal * \brief Check whether we can use a device's cached target list * * \param[in] dev Fencing device to check * * \return \c true if \p dev cached its targets less than a minute ago, * otherwise \c false */ static inline bool can_use_target_cache(const stonith_device_t *dev) { return (dev->targets != NULL) && (time(NULL) < (dev->targets_age + 60)); } static void can_fence_host_with_device(stonith_device_t *dev, struct device_search_s *search) { gboolean can = FALSE; const char *check_type = "Internal bug"; const char *target = NULL; const char *alias = NULL; const char *dev_id = "Unspecified device"; const char *action = (search == NULL)? NULL : search->action; CRM_CHECK((dev != NULL) && (action != NULL), goto search_report_results); if (dev->id != NULL) { dev_id = dev->id; } target = search->host; if (target == NULL) { can = TRUE; check_type = "No target"; goto search_report_results; } /* Answer immediately if the device does not support the action * or the local node is not allowed to perform it */ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none) && !pcmk_is_set(dev->flags, st_device_supports_on)) { check_type = "Agent does not support 'on'"; goto search_report_results; } else if (!localhost_is_eligible_with_remap(dev, action, target, search->allow_self)) { check_type = "This node is not allowed to execute action"; goto search_report_results; } // Check eligibility as specified by pcmk_host_check check_type = target_list_type(dev); alias = g_hash_table_lookup(dev->aliases, target); if (pcmk__str_eq(check_type, PCMK_VALUE_NONE, pcmk__str_casei)) { can = TRUE; } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATIC_LIST, pcmk__str_casei)) { if (pcmk__str_in_list(target, dev->targets, pcmk__str_casei)) { can = TRUE; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) && g_hash_table_lookup(dev->aliases, target)) { can = TRUE; } } else if (pcmk__str_eq(check_type, PCMK_VALUE_DYNAMIC_LIST, pcmk__str_casei)) { if (!can_use_target_cache(dev)) { int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_list_timeout (%ds) parameter of %s " "is larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), timeout may occur", device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL, search->per_device_timeout, search, dynamic_list_search_cb); /* we'll respond to this search request async in the cb */ return; } if (pcmk__str_in_list(((alias == NULL)? target : alias), dev->targets, pcmk__str_casei)) { can = TRUE; } } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATUS, pcmk__str_casei)) { int device_timeout = get_action_timeout(dev, check_type, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_status_timeout (%ds) parameter of %s is " "larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), " "timeout may occur", device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target, search->per_device_timeout, search, status_search_cb); /* we'll respond to this search request async in the cb */ return; } else { crm_err("Invalid value for " PCMK_STONITH_HOST_CHECK ": %s", check_type); check_type = "Invalid " PCMK_STONITH_HOST_CHECK; } search_report_results: crm_info("%s is%s eligible to fence (%s) %s%s%s%s: %s", dev_id, (can? "" : " not"), pcmk__s(action, "unspecified action"), pcmk__s(target, "unspecified target"), (alias == NULL)? "" : " (as '", pcmk__s(alias, ""), (alias == NULL)? "" : "')", check_type); search_devices_record_result(search, ((dev == NULL)? NULL : dev_id), can); } static void search_devices(gpointer key, gpointer value, gpointer user_data) { stonith_device_t *dev = value; struct device_search_s *search = user_data; can_fence_host_with_device(dev, search); } #define DEFAULT_QUERY_TIMEOUT 20 static void get_capable_devices(const char *host, const char *action, int timeout, bool allow_self, void *user_data, void (*callback) (GList * devices, void *user_data), uint32_t support_action_only) { struct device_search_s *search; guint ndevices = g_hash_table_size(device_list); if (ndevices == 0) { callback(NULL, user_data); return; } search = pcmk__assert_alloc(1, sizeof(struct device_search_s)); search->host = pcmk__str_copy(host); search->action = pcmk__str_copy(action); search->per_device_timeout = timeout; search->allow_self = allow_self; search->callback = callback; search->user_data = user_data; search->support_action_only = support_action_only; /* We are guaranteed this many replies, even if a device is * unregistered while the search is in progress. */ search->replies_needed = ndevices; crm_debug("Searching %d device%s to see which can execute '%s' targeting %s", ndevices, pcmk__plural_s(ndevices), (search->action? search->action : "unknown action"), (search->host? search->host : "any node")); g_hash_table_foreach(device_list, search_devices, search); } struct st_query_data { xmlNode *reply; char *remote_peer; char *client_id; char *target; char *action; int call_options; }; /*! * \internal * \brief Add action-specific attributes to query reply XML * * \param[in,out] xml XML to add attributes to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target */ static void add_action_specific_attributes(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target) { int action_specific_timeout; int delay_max; int delay_base; CRM_CHECK(xml && action && device, return); // PCMK__XA_ST_REQUIRED is currently used only for unfencing if (is_action_required(action, device)) { crm_trace("Action '%s' is required using %s", action, device->id); crm_xml_add_int(xml, PCMK__XA_ST_REQUIRED, 1); } // pcmk__timeout if configured action_specific_timeout = get_action_timeout(device, action, 0); if (action_specific_timeout) { crm_trace("Action '%s' has timeout %ds using %s", action, action_specific_timeout, device->id); crm_xml_add_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, action_specific_timeout); } delay_max = get_action_delay_max(device, action); if (delay_max > 0) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); crm_xml_add_int(xml, PCMK__XA_ST_DELAY_MAX, delay_max); } delay_base = get_action_delay_base(device, action, target); if (delay_base > 0) { crm_xml_add_int(xml, PCMK__XA_ST_DELAY_BASE, delay_base); } if ((delay_max > 0) && (delay_base == 0)) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); } else if ((delay_max == 0) && (delay_base > 0)) { crm_trace("Action '%s' has a static delay of %ds using %s", action, delay_base, device->id); } else if ((delay_max > 0) && (delay_base > 0)) { crm_trace("Action '%s' has a minimum delay of %ds and a randomly chosen " "maximum delay of %ds using %s", action, delay_base, delay_max, device->id); } } /*! * \internal * \brief Add "disallowed" attribute to query reply XML if appropriate * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_self Whether self-fencing is allowed */ static void add_disallowed(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target, gboolean allow_self) { if (!localhost_is_eligible(device, action, target, allow_self)) { crm_trace("Action '%s' using %s is disallowed for local host", action, device->id); pcmk__xe_set_bool_attr(xml, PCMK__XA_ST_ACTION_DISALLOWED, true); } } /*! * \internal * \brief Add child element with action-specific values to query reply XML * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_self Whether self-fencing is allowed */ static void add_action_reply(xmlNode *xml, const char *action, const stonith_device_t *device, const char *target, gboolean allow_self) { xmlNode *child = pcmk__xe_create(xml, PCMK__XE_ST_DEVICE_ACTION); crm_xml_add(child, PCMK_XA_ID, action); add_action_specific_attributes(child, action, device, target); add_disallowed(child, action, device, target, allow_self); } /*! * \internal * \brief Send a reply to a CPG peer or IPC client * * \param[in] reply XML reply to send * \param[in] call_options Send synchronously if st_opt_sync_call is set * \param[in] remote_peer If not NULL, name of peer node to send CPG reply * \param[in,out] client If not NULL, client to send IPC reply */ static void stonith_send_reply(const xmlNode *reply, int call_options, const char *remote_peer, pcmk__client_t *client) { CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)), return); if (remote_peer == NULL) { do_local_reply(reply, client, call_options); } else { const pcmk__node_status_t *node = pcmk__get_node(0, remote_peer, NULL, pcmk__node_search_cluster_member); pcmk__cluster_send_message(node, pcmk_ipc_fenced, reply); } } static void stonith_query_capable_device_cb(GList * devices, void *user_data) { struct st_query_data *query = user_data; int available_devices = 0; xmlNode *wrapper = NULL; xmlNode *list = NULL; GList *lpc = NULL; pcmk__client_t *client = NULL; if (query->client_id != NULL) { client = pcmk__find_client_by_id(query->client_id); if ((client == NULL) && (query->remote_peer == NULL)) { crm_trace("Skipping reply to %s: no longer a client", query->client_id); goto done; } } // Pack the results into XML wrapper = pcmk__xe_create(query->reply, PCMK__XE_ST_CALLDATA); list = pcmk__xe_create(wrapper, __func__); crm_xml_add(list, PCMK__XA_ST_TARGET, query->target); for (lpc = devices; lpc != NULL; lpc = lpc->next) { stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data); const char *action = query->action; xmlNode *dev = NULL; if (!device) { /* It is possible the device got unregistered while * determining who can fence the target */ continue; } available_devices++; dev = pcmk__xe_create(list, PCMK__XE_ST_DEVICE_ID); crm_xml_add(dev, PCMK_XA_ID, device->id); crm_xml_add(dev, PCMK__XA_NAMESPACE, device->namespace); crm_xml_add(dev, PCMK_XA_AGENT, device->agent); // Has had successful monitor, list, or status on this node crm_xml_add_int(dev, PCMK__XA_ST_MONITOR_VERIFIED, device->verified); crm_xml_add_int(dev, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, device->flags); /* If the originating fencer wants to reboot the node, and we have a * capable device that doesn't support "reboot", remap to "off" instead. */ if (!pcmk_is_set(device->flags, st_device_supports_reboot) && pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { crm_trace("%s doesn't support reboot, using values for off instead", device->id); action = PCMK_ACTION_OFF; } /* Add action-specific values if available */ add_action_specific_attributes(dev, action, device, query->target); if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A "reboot" *might* get remapped to "off" then "on", so after * sending the "reboot"-specific values in the main element, we add * sub-elements for "off" and "on" values. * * We short-circuited earlier if "reboot", "off" and "on" are all * disallowed for the local host. However if only one or two are * disallowed, we send back the results and mark which ones are * disallowed. If "reboot" is disallowed, this might cause problems * with older fencer versions, which won't check for it. Older * versions will ignore "off" and "on", so they are not a problem. */ add_disallowed(dev, action, device, query->target, pcmk_is_set(query->call_options, st_opt_allow_self_fencing)); add_action_reply(dev, PCMK_ACTION_OFF, device, query->target, pcmk_is_set(query->call_options, st_opt_allow_self_fencing)); add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE); } /* A query without a target wants device parameters */ if (query->target == NULL) { xmlNode *attrs = pcmk__xe_create(dev, PCMK__XE_ATTRIBUTES); g_hash_table_foreach(device->params, hash2field, attrs); } } crm_xml_add_int(list, PCMK__XA_ST_AVAILABLE_DEVICES, available_devices); if (query->target) { crm_debug("Found %d matching device%s for target '%s'", available_devices, pcmk__plural_s(available_devices), query->target); } else { crm_debug("%d device%s installed", available_devices, pcmk__plural_s(available_devices)); } crm_log_xml_trace(list, "query-result"); stonith_send_reply(query->reply, query->call_options, query->remote_peer, client); done: pcmk__xml_free(query->reply); free(query->remote_peer); free(query->client_id); free(query->target); free(query->action); free(query); g_list_free_full(devices, free); } /*! * \internal * \brief Log the result of an asynchronous command * * \param[in] cmd Command the result is for * \param[in] result Result of command * \param[in] pid Process ID of command, if available * \param[in] next Alternate device that will be tried if command failed * \param[in] op_merged Whether this command was merged with an earlier one */ static void log_async_result(const async_command_t *cmd, const pcmk__action_result_t *result, int pid, const char *next, bool op_merged) { int log_level = LOG_ERR; int output_log_level = LOG_NEVER; guint devices_remaining = g_list_length(cmd->next_device_iter); GString *msg = g_string_sized_new(80); // Reasonable starting size // Choose log levels appropriately if we have a result if (pcmk__result_ok(result)) { log_level = (cmd->target == NULL)? LOG_DEBUG : LOG_NOTICE; if ((result->action_stdout != NULL) && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, pcmk__str_none)) { output_log_level = LOG_DEBUG; } next = NULL; } else { log_level = (cmd->target == NULL)? LOG_NOTICE : LOG_ERR; if ((result->action_stdout != NULL) && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, pcmk__str_none)) { output_log_level = LOG_WARNING; } } // Build the log message piece by piece pcmk__g_strcat(msg, "Operation '", cmd->action, "' ", NULL); if (pid != 0) { g_string_append_printf(msg, "[%d] ", pid); } if (cmd->target != NULL) { pcmk__g_strcat(msg, "targeting ", cmd->target, " ", NULL); } if (cmd->device != NULL) { pcmk__g_strcat(msg, "using ", cmd->device, " ", NULL); } // Add exit status or execution status as appropriate if (result->execution_status == PCMK_EXEC_DONE) { g_string_append_printf(msg, "returned %d", result->exit_status); } else { pcmk__g_strcat(msg, "could not be executed: ", pcmk_exec_status_str(result->execution_status), NULL); } // Add exit reason and next device if appropriate if (result->exit_reason != NULL) { pcmk__g_strcat(msg, " (", result->exit_reason, ")", NULL); } if (next != NULL) { pcmk__g_strcat(msg, ", retrying with ", next, NULL); } if (devices_remaining > 0) { g_string_append_printf(msg, " (%u device%s remaining)", (unsigned int) devices_remaining, pcmk__plural_s(devices_remaining)); } g_string_append_printf(msg, " " QB_XS " %scall %d from %s", (op_merged? "merged " : ""), cmd->id, cmd->client_name); // Log the result do_crm_log(log_level, "%s", msg->str); g_string_free(msg, TRUE); // Log the output (which may have multiple lines), if appropriate if (output_log_level != LOG_NEVER) { char *prefix = crm_strdup_printf("%s[%d]", cmd->device, pid); crm_log_output(output_log_level, prefix, result->action_stdout); free(prefix); } } /*! * \internal * \brief Reply to requester after asynchronous command completion * * \param[in] cmd Command that completed * \param[in] result Result of command * \param[in] pid Process ID of command, if available * \param[in] merged If true, command was merged with another, not executed */ static void send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result, int pid, bool merged) { xmlNode *reply = NULL; pcmk__client_t *client = NULL; CRM_CHECK((cmd != NULL) && (result != NULL), return); log_async_result(cmd, result, pid, NULL, merged); if (cmd->client != NULL) { client = pcmk__find_client_by_id(cmd->client); if ((client == NULL) && (cmd->origin == NULL)) { crm_trace("Skipping reply to %s: no longer a client", cmd->client); return; } } reply = construct_async_reply(cmd, result); if (merged) { pcmk__xe_set_bool_attr(reply, PCMK__XA_ST_OP_MERGED, true); } if (pcmk__is_fencing_action(cmd->action) && pcmk__str_eq(cmd->origin, cmd->target, pcmk__str_casei)) { /* The target was also the originator, so broadcast the result on its * behalf (since it will be unable to). */ crm_trace("Broadcast '%s' result for %s (target was also originator)", cmd->action, cmd->target); crm_xml_add(reply, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); crm_xml_add(reply, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, reply); } else { // Reply only to the originator stonith_send_reply(reply, cmd->options, cmd->origin, client); } crm_log_xml_trace(reply, "Reply"); pcmk__xml_free(reply); } static void cancel_stonith_command(async_command_t * cmd) { stonith_device_t *device = cmd_device(cmd); if (device) { crm_trace("Cancel scheduled '%s' action using %s", cmd->action, device->id); device->pending_ops = g_list_remove(device->pending_ops, cmd); } } /*! * \internal * \brief Cancel and reply to any duplicates of a just-completed operation * * Check whether any fencing operations are scheduled to do the same thing as * one that just succeeded. If so, rather than performing the same operation * twice, return the result of this operation for all matching pending commands. * * \param[in,out] cmd Fencing operation that just succeeded * \param[in] result Result of \p cmd * \param[in] pid If nonzero, process ID of agent invocation (for logs) * * \note Duplicate merging will do the right thing for either type of remapped * reboot. If the executing fencer remapped an unsupported reboot to off, * then cmd->action will be "reboot" and will be merged with any other * reboot requests. If the originating fencer remapped a topology reboot * to off then on, we will get here once with cmd->action "off" and once * with "on", and they will be merged separately with similar requests. */ static void reply_to_duplicates(async_command_t *cmd, const pcmk__action_result_t *result, int pid) { GList *next = NULL; for (GList *iter = cmd_list; iter != NULL; iter = next) { async_command_t *cmd_other = iter->data; next = iter->next; // We might delete this entry, so grab next now if (cmd == cmd_other) { continue; } /* A pending operation matches if: * 1. The client connections are different. * 2. The target is the same. * 3. The fencing action is the same. * 4. The device scheduled to execute the action is the same. */ if (pcmk__str_eq(cmd->client, cmd_other->client, pcmk__str_casei) || !pcmk__str_eq(cmd->target, cmd_other->target, pcmk__str_casei) || !pcmk__str_eq(cmd->action, cmd_other->action, pcmk__str_none) || !pcmk__str_eq(cmd->device, cmd_other->device, pcmk__str_casei)) { continue; } crm_notice("Merging fencing action '%s'%s%s originating from " "client %s with identical fencing request from client %s", cmd_other->action, (cmd_other->target == NULL)? "" : " targeting ", pcmk__s(cmd_other->target, ""), cmd_other->client_name, cmd->client_name); // Stop tracking the duplicate, send its result, and cancel it cmd_list = g_list_remove_link(cmd_list, iter); send_async_reply(cmd_other, result, pid, true); cancel_stonith_command(cmd_other); free_async_command(cmd_other); g_list_free_1(iter); } } /*! * \internal * \brief Return the next required device (if any) for an operation * * \param[in,out] cmd Fencing operation that just succeeded * * \return Next device required for action if any, otherwise NULL */ static stonith_device_t * next_required_device(async_command_t *cmd) { for (GList *iter = cmd->next_device_iter; iter != NULL; iter = iter->next) { stonith_device_t *next_device = g_hash_table_lookup(device_list, iter->data); if (is_action_required(cmd->action, next_device)) { /* This is only called for successful actions, so it's OK to skip * non-required devices. */ cmd->next_device_iter = iter->next; return next_device; } } return NULL; } static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; stonith_device_t *device = NULL; stonith_device_t *next_device = NULL; CRM_CHECK(cmd != NULL, return); device = cmd_device(cmd); cmd->active_on = NULL; /* The device is ready to do something else now */ if (device) { if (!device->verified && pcmk__result_ok(result) && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS, NULL)) { device->verified = TRUE; } mainloop_set_trigger(device->work); } if (pcmk__result_ok(result)) { next_device = next_required_device(cmd); } else if ((cmd->next_device_iter != NULL) && !is_action_required(cmd->action, device)) { /* if this device didn't work out, see if there are any others we can try. * if the failed device was 'required', we can't pick another device. */ next_device = g_hash_table_lookup(device_list, cmd->next_device_iter->data); cmd->next_device_iter = cmd->next_device_iter->next; } if (next_device == NULL) { send_async_reply(cmd, result, pid, false); if (pcmk__result_ok(result)) { reply_to_duplicates(cmd, result, pid); } free_async_command(cmd); } else { // This operation requires more fencing log_async_result(cmd, result, pid, next_device->id, false); schedule_stonith_command(cmd, next_device); } } static gint sort_device_priority(gconstpointer a, gconstpointer b) { const stonith_device_t *dev_a = a; const stonith_device_t *dev_b = b; if (dev_a->priority > dev_b->priority) { return -1; } else if (dev_a->priority < dev_b->priority) { return 1; } return 0; } static void stonith_fence_get_devices_cb(GList * devices, void *user_data) { async_command_t *cmd = user_data; stonith_device_t *device = NULL; guint ndevices = g_list_length(devices); crm_info("Found %d matching device%s for target '%s'", ndevices, pcmk__plural_s(ndevices), cmd->target); if (devices != NULL) { /* Order based on priority */ devices = g_list_sort(devices, sort_device_priority); device = g_hash_table_lookup(device_list, devices->data); } if (device == NULL) { // No device found pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; pcmk__format_result(&result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "No device configured for target '%s'", cmd->target); send_async_reply(cmd, &result, 0, false); pcmk__reset_result(&result); free_async_command(cmd); g_list_free_full(devices, free); } else { // Device found, schedule it for fencing cmd->device_list = devices; cmd->next_device_iter = devices->next; schedule_stonith_command(cmd, device); } } /*! * \internal * \brief Execute a fence action via the local node * * \param[in] msg Fencing request * \param[out] result Where to store result of fence action */ static void fence_locally(xmlNode *msg, pcmk__action_result_t *result) { const char *device_id = NULL; stonith_device_t *device = NULL; async_command_t *cmd = NULL; xmlNode *dev = NULL; CRM_CHECK((msg != NULL) && (result != NULL), return); dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_ERR); cmd = create_async_command(msg); if (cmd == NULL) { crm_log_xml_warn(msg, "invalid"); fenced_set_protocol_error(result); return; } device_id = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (device_id != NULL) { device = g_hash_table_lookup(device_list, device_id); if (device == NULL) { crm_err("Requested device '%s' is not available", device_id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Requested device '%s' not found", device_id); return; } schedule_stonith_command(cmd, device); } else { const char *host = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); if (pcmk_is_set(cmd->options, st_opt_cs_nodeid)) { int nodeid = 0; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(host, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); if (node != NULL) { host = node->name; } } /* If we get to here, then self-fencing is implicitly allowed */ get_capable_devices(host, cmd->action, cmd->default_timeout, TRUE, cmd, stonith_fence_get_devices_cb, fenced_support_flag(cmd->action)); } pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } /*! * \internal * \brief Build an XML reply for a fencing operation * * \param[in] request Request that reply is for * \param[in] data If not NULL, add to reply as call data * \param[in] result Full result of fencing operation * * \return Newly created XML reply * \note The caller is responsible for freeing the result. * \note This has some overlap with construct_async_reply(), but that copies * values from an async_command_t, whereas this one copies them from the * request. */ xmlNode * fenced_construct_reply(const xmlNode *request, xmlNode *data, const pcmk__action_result_t *result) { xmlNode *reply = NULL; reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); stonith__xe_set_result(reply, result); if (request == NULL) { /* Most likely, this is the result of a stonith operation that was * initiated before we came up. Unfortunately that means we lack enough * information to provide clients with a full result. * * @TODO Maybe synchronize this information at start-up? */ crm_warn("Missing request information for client notifications for " "operation with result '%s' (initiated before we came up?)", pcmk_exec_status_str(result->execution_status)); } else { const char *name = NULL; const char *value = NULL; // Attributes to copy from request to reply const char *names[] = { PCMK__XA_ST_OP, PCMK__XA_ST_CALLID, PCMK__XA_ST_CLIENTID, PCMK__XA_ST_CLIENTNAME, PCMK__XA_ST_REMOTE_OP, PCMK__XA_ST_CALLOPT, }; for (int lpc = 0; lpc < PCMK__NELEM(names); lpc++) { name = names[lpc]; value = pcmk__xe_get(request, name); crm_xml_add(reply, name, value); } if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_ST_CALLDATA); pcmk__xml_copy(wrapper, data); } } return reply; } /*! * \internal * \brief Build an XML reply to an asynchronous fencing command * * \param[in] cmd Fencing command that reply is for * \param[in] result Command result */ static xmlNode * construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result) { xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); crm_xml_add(reply, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(reply, PCMK__XA_ST_OP, cmd->op); crm_xml_add(reply, PCMK__XA_ST_DEVICE_ID, cmd->device); crm_xml_add(reply, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id); crm_xml_add(reply, PCMK__XA_ST_CLIENTID, cmd->client); crm_xml_add(reply, PCMK__XA_ST_CLIENTNAME, cmd->client_name); crm_xml_add(reply, PCMK__XA_ST_TARGET, cmd->target); crm_xml_add(reply, PCMK__XA_ST_DEVICE_ACTION, cmd->op); crm_xml_add(reply, PCMK__XA_ST_ORIGIN, cmd->origin); crm_xml_add_int(reply, PCMK__XA_ST_CALLID, cmd->id); crm_xml_add_int(reply, PCMK__XA_ST_CALLOPT, cmd->options); stonith__xe_set_result(reply, result); return reply; } bool fencing_peer_active(pcmk__node_status_t *peer) { return (peer != NULL) && (peer->name != NULL) && pcmk_is_set(peer->processes, crm_get_cluster_proc()); } void set_fencing_completed(remote_fencing_op_t *op) { struct timespec tv; qb_util_timespec_from_epoch_get(&tv); op->completed = tv.tv_sec; op->completed_nsec = tv.tv_nsec; } /*! * \internal * \brief Look for alternate node needed if local node shouldn't fence target * * \param[in] target Node that must be fenced * * \return Name of an alternate node that should fence \p target if any, * or NULL otherwise */ static const char * check_alternate_host(const char *target) { if (pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) { GHashTableIter gIter; pcmk__node_status_t *entry = NULL; g_hash_table_iter_init(&gIter, pcmk__peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if (fencing_peer_active(entry) && !pcmk__str_eq(entry->name, target, pcmk__str_casei)) { crm_notice("Forwarding self-fencing request to %s", entry->name); return entry->name; } } crm_warn("Will handle own fencing because no peer can"); } return NULL; } static void remove_relay_op(xmlNode * request) { xmlNode *dev = pcmk__xpath_find_one(request->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_TRACE); const char *relay_op_id = NULL; const char *op_id = NULL; const char *client_name = NULL; const char *target = NULL; remote_fencing_op_t *relay_op = NULL; if (dev) { target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); } relay_op_id = pcmk__xe_get(request, PCMK__XA_ST_REMOTE_OP_RELAY); op_id = pcmk__xe_get(request, PCMK__XA_ST_REMOTE_OP); client_name = pcmk__xe_get(request, PCMK__XA_ST_CLIENTNAME); /* Delete RELAY operation. */ if ((relay_op_id != NULL) && (target != NULL) && pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) { relay_op = g_hash_table_lookup(stonith_remote_op_list, relay_op_id); if (relay_op) { GHashTableIter iter; remote_fencing_op_t *list_op = NULL; g_hash_table_iter_init(&iter, stonith_remote_op_list); /* If the operation to be deleted is registered as a duplicate, delete the registration. */ while (g_hash_table_iter_next(&iter, NULL, (void **)&list_op)) { GList *dup_iter = NULL; if (list_op != relay_op) { for (dup_iter = list_op->duplicates; dup_iter != NULL; dup_iter = dup_iter->next) { remote_fencing_op_t *other = dup_iter->data; if (other == relay_op) { other->duplicates = g_list_remove(other->duplicates, relay_op); break; } } } } crm_debug("Deleting relay op %s ('%s'%s%s for %s), " "replaced by op %s ('%s'%s%s for %s)", relay_op->id, relay_op->action, (relay_op->target == NULL)? "" : " targeting ", pcmk__s(relay_op->target, ""), relay_op->client_name, op_id, relay_op->action, (target == NULL)? "" : " targeting ", pcmk__s(target, ""), client_name); g_hash_table_remove(stonith_remote_op_list, relay_op_id); } } } /*! * \internal * \brief Check whether an API request was sent by a privileged user * * API commands related to fencing configuration may be done only by privileged * IPC users (i.e. root or hacluster), because all other users should go through * the CIB to have ACLs applied. If no client was given, this is a peer request, * which is always allowed. * * \param[in] c IPC client that sent request (or NULL if sent by CPG peer) * \param[in] op Requested API operation (for logging only) * * \return true if sender is peer or privileged client, otherwise false */ static inline bool is_privileged(const pcmk__client_t *c, const char *op) { if ((c == NULL) || pcmk_is_set(c->flags, pcmk__client_privileged)) { return true; } else { crm_warn("Rejecting IPC request '%s' from unprivileged client %s", pcmk__s(op, ""), pcmk__client_name(c)); return false; } } // CRM_OP_REGISTER static xmlNode * handle_register_request(pcmk__request_t *request) { xmlNode *reply = pcmk__xe_create(NULL, "reply"); pcmk__assert(request->ipc_client != NULL); crm_xml_add(reply, PCMK__XA_ST_OP, CRM_OP_REGISTER); crm_xml_add(reply, PCMK__XA_ST_CLIENTID, request->ipc_client->id); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return reply; } // STONITH_OP_EXEC static xmlNode * handle_agent_request(pcmk__request_t *request) { execute_agent_action(request->xml, &request->result); if (request->result.execution_status == PCMK_EXEC_PENDING) { return NULL; } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_TIMEOUT_UPDATE static xmlNode * handle_update_timeout_request(pcmk__request_t *request) { const char *call_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CALLID); const char *client_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CLIENTID); int op_timeout = 0; pcmk__xe_get_int(request->xml, PCMK__XA_ST_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } // STONITH_OP_QUERY static xmlNode * handle_query_request(pcmk__request_t *request) { int timeout = 0; xmlNode *dev = NULL; const char *action = NULL; const char *target = NULL; const char *client_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CLIENTID); struct st_query_data *query = NULL; if (request->peer != NULL) { // Record it for the future notification create_remote_stonith_op(client_id, request->xml, TRUE); } /* Delete the DC node RELAY operation. */ remove_relay_op(request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); dev = pcmk__xpath_find_one(request->xml->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_NEVER); if (dev != NULL) { const char *device = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (pcmk__str_eq(device, "manual_ack", pcmk__str_casei)) { return NULL; // No query or reply necessary } target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); action = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ACTION); } crm_log_xml_trace(request->xml, "Query"); query = pcmk__assert_alloc(1, sizeof(struct st_query_data)); query->reply = fenced_construct_reply(request->xml, NULL, &request->result); query->remote_peer = pcmk__str_copy(request->peer); query->client_id = pcmk__str_copy(client_id); query->target = pcmk__str_copy(target); query->action = pcmk__str_copy(action); query->call_options = request->call_options; pcmk__xe_get_int(request->xml, PCMK__XA_ST_TIMEOUT, &timeout); get_capable_devices(target, action, timeout, pcmk_is_set(query->call_options, st_opt_allow_self_fencing), query, stonith_query_capable_device_cb, st_device_supports_none); return NULL; } // STONITH_OP_NOTIFY static xmlNode * handle_notify_request(pcmk__request_t *request) { const char *flag_name = NULL; pcmk__assert(request->ipc_client != NULL); flag_name = pcmk__xe_get(request->xml, PCMK__XA_ST_NOTIFY_ACTIVATE); if (flag_name != NULL) { crm_debug("Enabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__set_client_flags(request->ipc_client, get_stonith_flag(flag_name)); } flag_name = pcmk__xe_get(request->xml, PCMK__XA_ST_NOTIFY_DEACTIVATE); if (flag_name != NULL) { crm_debug("Disabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__clear_client_flags(request->ipc_client, get_stonith_flag(flag_name)); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return pcmk__ipc_create_ack(request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_OK); } // STONITH_OP_RELAY static xmlNode * handle_relay_request(pcmk__request_t *request) { xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_TRACE); crm_notice("Received forwarded fencing request from " "%s %s to fence (%s) peer %s", pcmk__request_origin_type(request), pcmk__request_origin(request), pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ACTION), pcmk__xe_get(dev, PCMK__XA_ST_TARGET)); if (initiate_remote_stonith_op(NULL, request->xml, FALSE) == NULL) { fenced_set_protocol_error(&request->result); return fenced_construct_reply(request->xml, NULL, &request->result); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); return NULL; } // STONITH_OP_FENCE static xmlNode * handle_fence_request(pcmk__request_t *request) { if (request->peer != NULL) { fence_locally(request->xml, &request->result); } else if (pcmk_is_set(request->call_options, st_opt_manual_ack)) { switch (fenced_handle_manual_confirmation(request->ipc_client, request->xml)) { case pcmk_rc_ok: pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); break; case EINPROGRESS: pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); break; default: fenced_set_protocol_error(&request->result); break; } } else { const char *alternate_host = NULL; xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_TRACE); const char *target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); const char *action = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ACTION); const char *device = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (request->ipc_client != NULL) { int tolerance = 0; crm_notice("Client %s wants to fence (%s) %s using %s", pcmk__request_origin(request), action, target, (device? device : "any device")); pcmk__xe_get_int(dev, PCMK__XA_ST_TOLERANCE, &tolerance); if (stonith_check_fence_tolerance(tolerance, target, action)) { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return fenced_construct_reply(request->xml, NULL, &request->result); } alternate_host = check_alternate_host(target); } else { crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'", request->peer, action, target, (device == NULL)? "(any)" : device); } if (alternate_host != NULL) { const char *client_id = NULL; remote_fencing_op_t *op = NULL; pcmk__node_status_t *node = pcmk__get_node(0, alternate_host, NULL, pcmk__node_search_cluster_member); if (request->ipc_client->id == 0) { client_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CLIENTID); } else { client_id = request->ipc_client->id; } /* Create a duplicate fencing operation to relay with the client ID. * When a query response is received, this operation should be * deleted to avoid keeping the duplicate around. */ op = create_remote_stonith_op(client_id, request->xml, FALSE); crm_xml_add(request->xml, PCMK__XA_ST_OP, STONITH_OP_RELAY); crm_xml_add(request->xml, PCMK__XA_ST_CLIENTID, request->ipc_client->id); crm_xml_add(request->xml, PCMK__XA_ST_REMOTE_OP, op->id); // @TODO On failure, fail request immediately, or maybe panic pcmk__cluster_send_message(node, pcmk_ipc_fenced, request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } else if (initiate_remote_stonith_op(request->ipc_client, request->xml, FALSE) == NULL) { fenced_set_protocol_error(&request->result); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } } if (request->result.execution_status == PCMK_EXEC_PENDING) { return NULL; } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_FENCE_HISTORY static xmlNode * handle_history_request(pcmk__request_t *request) { xmlNode *reply = NULL; xmlNode *data = NULL; stonith_fence_history(request->xml, &data, request->peer, request->call_options); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (!pcmk_is_set(request->call_options, st_opt_discard_reply)) { /* When the local node broadcasts its history, it sets * st_opt_discard_reply and doesn't need a reply. */ reply = fenced_construct_reply(request->xml, data, &request->result); } pcmk__xml_free(data); return reply; } // STONITH_OP_DEVICE_ADD static xmlNode * handle_device_add_request(pcmk__request_t *request) { const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//" PCMK__XE_ST_DEVICE_ID, LOG_ERR); if (is_privileged(request->ipc_client, op)) { int rc = stonith_device_register(dev, FALSE); pcmk__set_result(&request->result, ((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR), stonith__legacy2status(rc), ((rc == pcmk_ok)? NULL : pcmk_strerror(rc))); } else { pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must register device via CIB"); } fenced_send_config_notification(op, &request->result, (dev == NULL)? NULL : pcmk__xe_id(dev)); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_DEVICE_DEL static xmlNode * handle_device_delete_request(pcmk__request_t *request) { xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//" PCMK__XE_ST_DEVICE_ID, LOG_ERR); const char *device_id = pcmk__xe_get(dev, PCMK_XA_ID); const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { stonith_device_remove(device_id, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must delete device via CIB"); } fenced_send_config_notification(op, &request->result, device_id); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_LEVEL_ADD static xmlNode * handle_level_add_request(pcmk__request_t *request) { char *desc = NULL; const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_register_level(request->xml, &desc, &request->result); } else { unpack_level_request(request->xml, NULL, NULL, NULL, &desc); pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must add level via CIB"); } fenced_send_config_notification(op, &request->result, desc); free(desc); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_LEVEL_DEL static xmlNode * handle_level_delete_request(pcmk__request_t *request) { char *desc = NULL; const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_unregister_level(request->xml, &desc, &request->result); } else { unpack_level_request(request->xml, NULL, NULL, NULL, &desc); pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must delete level via CIB"); } fenced_send_config_notification(op, &request->result, desc); free(desc); return fenced_construct_reply(request->xml, NULL, &request->result); } // CRM_OP_RM_NODE_CACHE static xmlNode * handle_cache_request(pcmk__request_t *request) { int node_id = 0; const char *name = NULL; pcmk__xe_get_int(request->xml, PCMK_XA_ID, &node_id); name = pcmk__xe_get(request->xml, PCMK_XA_UNAME); pcmk__cluster_forget_cluster_node(node_id, name); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { crm_err("Unknown IPC request %s from %s %s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request)); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", request->op); return fenced_construct_reply(request->xml, NULL, &request->result); } static void fenced_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_REGISTER, handle_register_request }, { STONITH_OP_EXEC, handle_agent_request }, { STONITH_OP_TIMEOUT_UPDATE, handle_update_timeout_request }, { STONITH_OP_QUERY, handle_query_request }, { STONITH_OP_NOTIFY, handle_notify_request }, { STONITH_OP_RELAY, handle_relay_request }, { STONITH_OP_FENCE, handle_fence_request }, { STONITH_OP_FENCE_HISTORY, handle_history_request }, { STONITH_OP_DEVICE_ADD, handle_device_add_request }, { STONITH_OP_DEVICE_DEL, handle_device_delete_request }, { STONITH_OP_LEVEL_ADD, handle_level_add_request }, { STONITH_OP_LEVEL_DEL, handle_level_delete_request }, { CRM_OP_RM_NODE_CACHE, handle_cache_request }, { NULL, handle_unknown_request }, }; fenced_handlers = pcmk__register_handlers(handlers); } void fenced_unregister_handlers(void) { if (fenced_handlers != NULL) { g_hash_table_destroy(fenced_handlers); fenced_handlers = NULL; } } static void handle_request(pcmk__request_t *request) { xmlNode *reply = NULL; const char *reason = NULL; if (fenced_handlers == NULL) { fenced_register_handlers(); } reply = pcmk__process_request(request, fenced_handlers); if (reply != NULL) { if (pcmk_is_set(request->flags, pcmk__request_reuse_options) && (request->ipc_client != NULL)) { /* Certain IPC-only commands must reuse the call options from the * original request rather than the ones set by stonith_send_reply() * -> do_local_reply(). */ pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply, request->ipc_flags); request->ipc_client->request_id = 0; } else { stonith_send_reply(reply, request->call_options, request->peer, request->ipc_client); } pcmk__xml_free(reply); } reason = request->result.exit_reason; crm_debug("Processed %s request from %s %s: %s%s%s%s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request), pcmk_exec_status_str(request->result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); } static void handle_reply(pcmk__client_t *client, xmlNode *request, const char *remote_peer) { // Copy, because request might be freed before we want to log this - char *op = crm_element_value_copy(request, PCMK__XA_ST_OP); + char *op = pcmk__xe_get_copy(request, PCMK__XA_ST_OP); if (pcmk__str_eq(op, STONITH_OP_QUERY, pcmk__str_none)) { process_remote_stonith_query(request); } else if (pcmk__str_any_of(op, STONITH_OP_NOTIFY, STONITH_OP_FENCE, NULL)) { fenced_process_fencing_reply(request); } else { crm_err("Ignoring unknown %s reply from %s %s", pcmk__s(op, "untyped"), ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); crm_log_xml_warn(request, "UnknownOp"); free(op); return; } crm_debug("Processed %s reply from %s %s", op, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); free(op); } /*! * \internal * \brief Handle a message from an IPC client or CPG peer * * \param[in,out] client If not NULL, IPC client that sent message * \param[in] id If from IPC client, IPC message ID * \param[in] flags Message flags * \param[in,out] message Message XML * \param[in] remote_peer If not NULL, CPG peer that sent message */ void stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags, xmlNode *message, const char *remote_peer) { uint32_t call_options = st_opt_none; int rc = pcmk_rc_ok; bool is_reply = false; CRM_CHECK(message != NULL, return); if (pcmk__xpath_find_one(message->doc, "//" PCMK__XE_ST_REPLY, LOG_NEVER) != NULL) { is_reply = true; } rc = pcmk__xe_get_flags(message, PCMK__XA_ST_CALLOPT, &call_options, st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from message: %s", pcmk_rc_str(rc)); } crm_debug("Processing %ssynchronous %s %s %u from %s %s", pcmk_is_set(call_options, st_opt_sync_call)? "" : "a", pcmk__xe_get(message, PCMK__XA_ST_OP), (is_reply? "reply" : "request"), id, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); if (pcmk_is_set(call_options, st_opt_sync_call)) { pcmk__assert((client == NULL) || (client->request_id == id)); } if (is_reply) { handle_reply(client, message, remote_peer); } else { pcmk__request_t request = { .ipc_client = client, .ipc_id = id, .ipc_flags = flags, .peer = remote_peer, .xml = message, .call_options = call_options, .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, PCMK__XA_ST_OP); + request.op = pcmk__xe_get_copy(request.xml, PCMK__XA_ST_OP); CRM_CHECK(request.op != NULL, return); if (pcmk_is_set(request.call_options, st_opt_sync_call)) { pcmk__set_request_flags(&request, pcmk__request_sync); } handle_request(&request); pcmk__reset_request(&request); } } diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c index 70791c9d57..9653e193c5 100644 --- a/daemons/fenced/fenced_history.c +++ b/daemons/fenced/fenced_history.c @@ -1,576 +1,574 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // xmlNode #include #include #include #include #include #include #include #include #include #define MAX_STONITH_HISTORY 500 /*! * \internal * \brief Send a broadcast to all nodes to trigger cleanup or * history synchronisation * * \param[in] history Optional history to be attached * \param[in] callopts We control cleanup via a flag in the callopts * \param[in] target Cleanup can be limited to certain fence-targets */ static void stonith_send_broadcast_history(xmlNode *history, int callopts, const char *target) { xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); xmlNode *wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); xmlNode *call_data = pcmk__xe_create(wrapper, __func__); crm_xml_add(bcast, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); crm_xml_add(bcast, PCMK__XA_ST_OP, STONITH_OP_FENCE_HISTORY); crm_xml_add_int(bcast, PCMK__XA_ST_CALLOPT, callopts); pcmk__xml_copy(call_data, history); if (target != NULL) { crm_xml_add(call_data, PCMK__XA_ST_TARGET, target); } pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, bcast); pcmk__xml_free(bcast); } static gboolean stonith_remove_history_entry (gpointer key, gpointer value, gpointer user_data) { remote_fencing_op_t *op = value; const char *target = (const char *) user_data; if ((op->state == st_failed) || (op->state == st_done)) { if ((target) && (strcmp(op->target, target) != 0)) { return FALSE; } return TRUE; } return FALSE; /* don't clean pending operations */ } /*! * \internal * \brief Send out a cleanup broadcast or do a local history-cleanup * * \param[in] target Cleanup can be limited to certain fence-targets * \param[in] broadcast Send out a cleanup broadcast */ static void stonith_fence_history_cleanup(const char *target, gboolean broadcast) { if (broadcast) { stonith_send_broadcast_history(NULL, st_opt_cleanup | st_opt_discard_reply, target); /* we'll do the local clean when we receive back our own broadcast */ } else if (stonith_remote_op_list) { g_hash_table_foreach_remove(stonith_remote_op_list, stonith_remove_history_entry, (gpointer) target); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } } /* keeping the length of fence-history within bounds * ================================================= * * If things are really running wild a lot of fencing-attempts * might fill up the hash-map, eventually using up a lot * of memory and creating huge history-sync messages. * Before the history being synced across nodes at least * the reboot of a cluster-node helped keeping the * history within bounds even though not in a reliable * manner. * * stonith_remote_op_list isn't sorted for time-stamps * thus it would be kind of expensive to delete e.g. * the oldest entry if it would grow past MAX_STONITH_HISTORY * entries. * It is more efficient to purge MAX_STONITH_HISTORY/2 * entries whenever the list grows beyond MAX_STONITH_HISTORY. * (sort for age + purge the MAX_STONITH_HISTORY/2 oldest) * That done on a per-node-base might raise the * probability of large syncs to occur. * Things like introducing a broadcast to purge * MAX_STONITH_HISTORY/2 entries or not sync above a certain * threshold coming to mind ... * Simplest thing though is to purge the full history * throughout the cluster once MAX_STONITH_HISTORY is reached. * On the other hand this leads to purging the history in * situations where it would be handy to have it probably. */ /*! * \internal * \brief Compare two remote fencing operations by status and completion time * * A pending operation is ordered before a completed operation. If both * operations have completed, then the more recently completed operation is * ordered first. Two pending operations are considered equal. * * \param[in] a First \c remote_fencing_op_t to compare * \param[in] b Second \c remote_fencing_op_t to compare * * \return Standard comparison result (a negative integer if \p a is lesser, * 0 if the values are equal, and a positive integer if \p a is greater) */ static gint cmp_op_by_completion(gconstpointer a, gconstpointer b) { const remote_fencing_op_t *op1 = a; const remote_fencing_op_t *op2 = b; bool op1_pending = stonith__op_state_pending(op1->state); bool op2_pending = stonith__op_state_pending(op2->state); if (op1_pending && op2_pending) { return 0; } if (op1_pending) { return -1; } if (op2_pending) { return 1; } if (op1->completed > op2->completed) { return -1; } if (op1->completed < op2->completed) { return 1; } if (op1->completed_nsec > op2->completed_nsec) { return -1; } if (op1->completed_nsec < op2->completed_nsec) { return 1; } return 0; } /*! * \internal * \brief Remove a completed operation from \c stonith_remote_op_list * * \param[in] data \c remote_fencing_op_t to remove * \param[in] user_data Ignored */ static void remove_completed_remote_op(gpointer data, gpointer user_data) { const remote_fencing_op_t *op = data; if (!stonith__op_state_pending(op->state)) { g_hash_table_remove(stonith_remote_op_list, op->id); } } /*! * \internal * \brief Do a local history-trim to MAX_STONITH_HISTORY / 2 entries * once over MAX_STONITH_HISTORY */ void stonith_fence_history_trim(void) { if (stonith_remote_op_list == NULL) { return; } if (g_hash_table_size(stonith_remote_op_list) > MAX_STONITH_HISTORY) { GList *ops = g_hash_table_get_values(stonith_remote_op_list); crm_trace("More than %d entries in fencing history, purging oldest " "completed operations", MAX_STONITH_HISTORY); ops = g_list_sort(ops, cmp_op_by_completion); // Always keep pending ops regardless of number of entries g_list_foreach(g_list_nth(ops, MAX_STONITH_HISTORY / 2), remove_completed_remote_op, NULL); // No need for a notification after purging old data g_list_free(ops); } } /*! * \internal * \brief Convert xml fence-history to a hash-table like stonith_remote_op_list * * \param[in] history Fence-history in xml * * \return Fence-history as hash-table */ static GHashTable * stonith_xml_history_to_list(const xmlNode *history) { xmlNode *xml_op = NULL; GHashTable *rv = NULL; init_stonith_remote_op_hash_table(&rv); CRM_LOG_ASSERT(rv != NULL); for (xml_op = pcmk__xe_first_child(history, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { remote_fencing_op_t *op = NULL; - char *id = crm_element_value_copy(xml_op, PCMK__XA_ST_REMOTE_OP); + char *id = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_REMOTE_OP); int state; int exit_status = CRM_EX_OK; int execution_status = PCMK_EXEC_DONE; if (!id) { crm_warn("Malformed fencing history received from peer"); continue; } crm_trace("Attaching op %s to hashtable", id); op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); op->id = id; - op->target = crm_element_value_copy(xml_op, PCMK__XA_ST_TARGET); - op->action = crm_element_value_copy(xml_op, PCMK__XA_ST_DEVICE_ACTION); - op->originator = crm_element_value_copy(xml_op, PCMK__XA_ST_ORIGIN); - op->delegate = crm_element_value_copy(xml_op, PCMK__XA_ST_DELEGATE); - op->client_name = crm_element_value_copy(xml_op, - PCMK__XA_ST_CLIENTNAME); + op->target = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_TARGET); + op->action = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_DEVICE_ACTION); + op->originator = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_ORIGIN); + op->delegate = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_DELEGATE); + op->client_name = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_CLIENTNAME); pcmk__xe_get_time(xml_op, PCMK__XA_ST_DATE, &op->completed); pcmk__xe_get_ll(xml_op, PCMK__XA_ST_DATE_NSEC, &op->completed_nsec); pcmk__xe_get_int(xml_op, PCMK__XA_ST_STATE, &state); op->state = (enum op_state) state; /* @COMPAT We can't use stonith__xe_get_result() here because * fencers <2.1.3 didn't include results, leading it to assume an error * status. Instead, set an unknown status in that case. */ if ((pcmk__xe_get_int(xml_op, PCMK__XA_RC_CODE, &exit_status) != pcmk_rc_ok) || (pcmk__xe_get_int(xml_op, PCMK__XA_OP_STATUS, &execution_status) != pcmk_rc_ok)) { exit_status = CRM_EX_INDETERMINATE; execution_status = PCMK_EXEC_UNKNOWN; } pcmk__set_result(&op->result, exit_status, execution_status, pcmk__xe_get(xml_op, PCMK_XA_EXIT_REASON)); pcmk__set_result_output(&op->result, - crm_element_value_copy(xml_op, - PCMK__XA_ST_OUTPUT), + pcmk__xe_get_copy(xml_op, PCMK__XA_ST_OUTPUT), NULL); g_hash_table_replace(rv, id, op); CRM_LOG_ASSERT(g_hash_table_lookup(rv, id) != NULL); } return rv; } /*! * \internal * \brief Craft xml difference between local fence-history and a history * coming from remote, and merge the remote history into the local * * \param[in,out] remote_history Fence-history as hash-table (may be NULL) * \param[in] add_id If crafting the answer for an API * history-request there is no need for the id * \param[in] target Optionally limit to certain fence-target * * \return The fence-history as xml */ static xmlNode * stonith_local_history_diff_and_merge(GHashTable *remote_history, gboolean add_id, const char *target) { xmlNode *history = NULL; GHashTableIter iter; remote_fencing_op_t *op = NULL; gboolean updated = FALSE; int cnt = 0; if (stonith_remote_op_list) { char *id = NULL; history = pcmk__xe_create(NULL, PCMK__XE_ST_HISTORY); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, (void **)&id, (void **)&op)) { xmlNode *entry = NULL; if (remote_history) { remote_fencing_op_t *remote_op = g_hash_table_lookup(remote_history, op->id); if (remote_op) { if (stonith__op_state_pending(op->state) && !stonith__op_state_pending(remote_op->state)) { crm_debug("Updating outdated pending operation %.8s " "(state=%s) according to the one (state=%s) from " "remote peer history", op->id, stonith_op_state_str(op->state), stonith_op_state_str(remote_op->state)); g_hash_table_steal(remote_history, op->id); op->id = remote_op->id; remote_op->id = id; g_hash_table_iter_replace(&iter, remote_op); updated = TRUE; continue; /* skip outdated entries */ } else if (!stonith__op_state_pending(op->state) && stonith__op_state_pending(remote_op->state)) { crm_debug("Broadcasting operation %.8s (state=%s) to " "update the outdated pending one " "(state=%s) in remote peer history", op->id, stonith_op_state_str(op->state), stonith_op_state_str(remote_op->state)); g_hash_table_remove(remote_history, op->id); } else { g_hash_table_remove(remote_history, op->id); continue; /* skip entries broadcasted already */ } } } if (!pcmk__str_eq(target, op->target, pcmk__str_null_matches)) { continue; } cnt++; crm_trace("Attaching op %s", op->id); entry = pcmk__xe_create(history, STONITH_OP_EXEC); if (add_id) { crm_xml_add(entry, PCMK__XA_ST_REMOTE_OP, op->id); } crm_xml_add(entry, PCMK__XA_ST_TARGET, op->target); crm_xml_add(entry, PCMK__XA_ST_DEVICE_ACTION, op->action); crm_xml_add(entry, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(entry, PCMK__XA_ST_DELEGATE, op->delegate); crm_xml_add(entry, PCMK__XA_ST_CLIENTNAME, op->client_name); crm_xml_add_ll(entry, PCMK__XA_ST_DATE, op->completed); crm_xml_add_ll(entry, PCMK__XA_ST_DATE_NSEC, op->completed_nsec); crm_xml_add_int(entry, PCMK__XA_ST_STATE, op->state); stonith__xe_set_result(entry, &op->result); } } if (remote_history) { init_stonith_remote_op_hash_table(&stonith_remote_op_list); updated |= g_hash_table_size(remote_history); g_hash_table_iter_init(&iter, remote_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { if (stonith__op_state_pending(op->state) && pcmk__str_eq(op->originator, fenced_get_local_node(), pcmk__str_casei)) { crm_warn("Failing pending operation %.8s originated by us but " "known only from peer history", op->id); op->state = st_failed; set_fencing_completed(op); /* CRM_EX_EXPIRED + PCMK_EXEC_INVALID prevents finalize_op() * from setting a delegate */ pcmk__set_result(&op->result, CRM_EX_EXPIRED, PCMK_EXEC_INVALID, "Initiated by earlier fencer " "process and presumed failed"); fenced_broadcast_op_result(op, false); } g_hash_table_iter_steal(&iter); g_hash_table_replace(stonith_remote_op_list, op->id, op); /* we could trim the history here but if we bail * out after trim we might miss more recent entries * of those that might still be in the list * if we don't bail out trimming once is more * efficient and memory overhead is minimal as * we are just moving pointers from one hash to * another */ } g_hash_table_destroy(remote_history); /* remove what is left */ } if (updated) { stonith_fence_history_trim(); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } if (cnt == 0) { pcmk__xml_free(history); return NULL; } else { return history; } } /*! * \internal * \brief Craft xml from the local fence-history * * \param[in] add_id If crafting the answer for an API * history-request there is no need for the id * \param[in] target Optionally limit to certain fence-target * * \return The fence-history as xml */ static xmlNode * stonith_local_history(gboolean add_id, const char *target) { return stonith_local_history_diff_and_merge(NULL, add_id, target); } /*! * \internal * \brief Handle fence-history messages (from API or coming in as broadcasts) * * \param[in,out] msg Request XML * \param[out] output Where to set local history, if requested * \param[in] remote_peer If broadcast, peer that sent it * \param[in] options Call options from the request */ void stonith_fence_history(xmlNode *msg, xmlNode **output, const char *remote_peer, int options) { const char *target = NULL; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_NEVER); xmlNode *out_history = NULL; if (dev) { target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); if (target && (options & st_opt_cs_nodeid)) { int nodeid; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(target, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); if (node != NULL) { target = node->name; } } } if (options & st_opt_cleanup) { const char *call_id = pcmk__xe_get(msg, PCMK__XA_ST_CALLID); crm_trace("Cleaning up operations on %s in %p", target, stonith_remote_op_list); stonith_fence_history_cleanup(target, (call_id != NULL)); } else if (options & st_opt_broadcast) { /* there is no clear sign atm for when a history sync is done so send a notification for anything that smells like history-sync */ fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED, NULL, NULL); if (pcmk__xe_get(msg, PCMK__XA_ST_CALLID) != NULL) { /* this is coming from the stonith-API * * craft a broadcast with node's history * so that every node can merge and broadcast * what it has on top */ out_history = stonith_local_history(TRUE, NULL); crm_trace("Broadcasting history to peers"); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); } else if (remote_peer && !pcmk__str_eq(remote_peer, fenced_get_local_node(), pcmk__str_casei)) { xmlNode *history = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_ST_HISTORY, LOG_NEVER); /* either a broadcast created directly upon stonith-API request * or a diff as response to such a thing * * in both cases it may have a history or not * if we have differential data * merge in what we've received and stop * otherwise broadcast what we have on top * marking as differential and merge in afterwards */ if (!history || !pcmk__xe_attr_is_true(history, PCMK__XA_ST_DIFFERENTIAL)) { GHashTable *received_history = NULL; if (history != NULL) { received_history = stonith_xml_history_to_list(history); } out_history = stonith_local_history_diff_and_merge(received_history, TRUE, NULL); if (out_history) { crm_trace("Broadcasting history-diff to peers"); pcmk__xe_set_bool_attr(out_history, PCMK__XA_ST_DIFFERENTIAL, true); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); } else { crm_trace("History-diff is empty - skip broadcast"); } } } else { crm_trace("Skipping history-query-broadcast (%s%s)" " we sent ourselves", remote_peer?"remote-peer=":"local-ipc", remote_peer?remote_peer:""); } } else { /* plain history request */ crm_trace("Looking for operations on %s in %p", target, stonith_remote_op_list); *output = stonith_local_history(FALSE, target); } pcmk__xml_free(out_history); } diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c index eaab144afa..336e5e7968 100644 --- a/daemons/fenced/fenced_remote.c +++ b/daemons/fenced/fenced_remote.c @@ -1,2622 +1,2621 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include // xmlNode #include #include #include #include #include #include #include #include #include #include #define TIMEOUT_MULTIPLY_FACTOR 1.2 /* When one fencer queries its peers for devices able to handle a fencing * request, each peer will reply with a list of such devices available to it. * Each reply will be parsed into a peer_device_info_t, with each device's * information kept in a device_properties_t. */ typedef struct device_properties_s { /* Whether access to this device has been verified */ gboolean verified; /* The remaining members are indexed by the operation's "phase" */ /* Whether this device has been executed in each phase */ gboolean executed[st_phase_max]; /* Whether this device is disallowed from executing in each phase */ gboolean disallowed[st_phase_max]; /* Action-specific timeout for each phase */ int custom_action_timeout[st_phase_max]; /* Action-specific maximum random delay for each phase */ int delay_max[st_phase_max]; /* Action-specific base delay for each phase */ int delay_base[st_phase_max]; /* Group of enum st_device_flags */ uint32_t device_support_flags; } device_properties_t; typedef struct { /* Name of peer that sent this result */ char *host; /* Only try peers for non-topology based operations once */ gboolean tried; /* Number of entries in the devices table */ int ndevices; /* Devices available to this host that are capable of fencing the target */ GHashTable *devices; } peer_device_info_t; GHashTable *stonith_remote_op_list = NULL; extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static void request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer); static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup); static void report_timeout_period(remote_fencing_op_t * op, int op_timeout); static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer); static gint sort_strings(gconstpointer a, gconstpointer b) { return strcmp(a, b); } static void free_remote_query(gpointer data) { if (data != NULL) { peer_device_info_t *peer = data; g_hash_table_destroy(peer->devices); free(peer->host); free(peer); } } void free_stonith_remote_op_list(void) { if (stonith_remote_op_list != NULL) { g_hash_table_destroy(stonith_remote_op_list); stonith_remote_op_list = NULL; } } struct peer_count_data { const remote_fencing_op_t *op; gboolean verified_only; uint32_t support_action_only; int count; }; /*! * \internal * \brief Increment a counter if a device has not been executed yet * * \param[in] key Device ID (ignored) * \param[in] value Device properties * \param[in,out] user_data Peer count data */ static void count_peer_device(gpointer key, gpointer value, gpointer user_data) { device_properties_t *props = (device_properties_t*)value; struct peer_count_data *data = user_data; if (!props->executed[data->op->phase] && (!data->verified_only || props->verified) && ((data->support_action_only == st_device_supports_none) || pcmk_is_set(props->device_support_flags, data->support_action_only))) { ++(data->count); } } /*! * \internal * \brief Check the number of available devices in a peer's query results * * \param[in] op Operation that results are for * \param[in] peer Peer to count * \param[in] verified_only Whether to count only verified devices * \param[in] support_action_only Whether to count only devices that support action * * \return Number of devices available to peer that were not already executed */ static int count_peer_devices(const remote_fencing_op_t *op, const peer_device_info_t *peer, gboolean verified_only, uint32_t support_on_action_only) { struct peer_count_data data; data.op = op; data.verified_only = verified_only; data.support_action_only = support_on_action_only; data.count = 0; if (peer) { g_hash_table_foreach(peer->devices, count_peer_device, &data); } return data.count; } /*! * \internal * \brief Search for a device in a query result * * \param[in] op Operation that result is for * \param[in] peer Query result for a peer * \param[in] device Device ID to search for * * \return Device properties if found, NULL otherwise */ static device_properties_t * find_peer_device(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, uint32_t support_action_only) { device_properties_t *props = g_hash_table_lookup(peer->devices, device); if (props && support_action_only != st_device_supports_none && !pcmk_is_set(props->device_support_flags, support_action_only)) { return NULL; } return (props && !props->executed[op->phase] && !props->disallowed[op->phase])? props : NULL; } /*! * \internal * \brief Find a device in a peer's device list and mark it as executed * * \param[in] op Operation that peer result is for * \param[in,out] peer Peer with results to search * \param[in] device ID of device to mark as done * \param[in] verified_devices_only Only consider verified devices * * \return TRUE if device was found and marked, FALSE otherwise */ static gboolean grab_peer_device(const remote_fencing_op_t *op, peer_device_info_t *peer, const char *device, gboolean verified_devices_only) { device_properties_t *props = find_peer_device(op, peer, device, fenced_support_flag(op->action)); if ((props == NULL) || (verified_devices_only && !props->verified)) { return FALSE; } crm_trace("Removing %s from %s (%d remaining)", device, peer->host, count_peer_devices(op, peer, FALSE, st_device_supports_none)); props->executed[op->phase] = TRUE; return TRUE; } static void clear_remote_op_timers(remote_fencing_op_t * op) { if (op->query_timer) { g_source_remove(op->query_timer); op->query_timer = 0; } if (op->op_timer_total) { g_source_remove(op->op_timer_total); op->op_timer_total = 0; } if (op->op_timer_one) { g_source_remove(op->op_timer_one); op->op_timer_one = 0; } } static void free_remote_op(gpointer data) { remote_fencing_op_t *op = data; crm_log_xml_debug(op->request, "Destroying"); clear_remote_op_timers(op); free(op->id); free(op->action); free(op->delegate); free(op->target); free(op->client_id); free(op->client_name); free(op->originator); if (op->query_results) { g_list_free_full(op->query_results, free_remote_query); } if (op->request) { pcmk__xml_free(op->request); op->request = NULL; } if (op->devices_list) { g_list_free_full(op->devices_list, free); op->devices_list = NULL; } g_list_free_full(op->automatic_list, free); g_list_free(op->duplicates); pcmk__reset_result(&op->result); free(op); } void init_stonith_remote_op_hash_table(GHashTable **table) { if (*table == NULL) { *table = pcmk__strkey_table(NULL, free_remote_op); } } /*! * \internal * \brief Return an operation's originally requested action (before any remap) * * \param[in] op Operation to check * * \return Operation's original action */ static const char * op_requested_action(const remote_fencing_op_t *op) { return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action); } /*! * \internal * \brief Remap a "reboot" operation to the "off" phase * * \param[in,out] op Operation to remap */ static void op_phase_off(remote_fencing_op_t *op) { crm_info("Remapping multiple-device reboot targeting %s to 'off' " QB_XS " id=%.8s", op->target, op->id); op->phase = st_phase_off; /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the * memory allocation at each phase. */ strcpy(op->action, PCMK_ACTION_OFF); } /*! * \internal * \brief Advance a remapped reboot operation to the "on" phase * * \param[in,out] op Operation to remap */ static void op_phase_on(remote_fencing_op_t *op) { GList *iter = NULL; crm_info("Remapped 'off' targeting %s complete, " "remapping to 'on' for %s " QB_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_on; strcpy(op->action, PCMK_ACTION_ON); /* Skip devices with automatic unfencing, because the cluster will handle it * when the node rejoins. */ for (iter = op->automatic_list; iter != NULL; iter = iter->next) { GList *match = g_list_find_custom(op->devices_list, iter->data, sort_strings); if (match) { op->devices_list = g_list_remove(op->devices_list, match->data); } } g_list_free_full(op->automatic_list, free); op->automatic_list = NULL; /* Rewind device list pointer */ op->devices = op->devices_list; } /*! * \internal * \brief Reset a remapped reboot operation * * \param[in,out] op Operation to reset */ static void undo_op_remap(remote_fencing_op_t *op) { if (op->phase > 0) { crm_info("Undoing remap of reboot targeting %s for %s " QB_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_requested; strcpy(op->action, PCMK_ACTION_REBOOT); } } /*! * \internal * \brief Create notification data XML for a fencing operation result * * \param[in,out] parent Parent XML element for newly created element * \param[in] op Fencer operation that completed * * \return Newly created XML to add as notification data * \note The caller is responsible for freeing the result. */ static xmlNode * fencing_result2xml(xmlNode *parent, const remote_fencing_op_t *op) { xmlNode *notify_data = pcmk__xe_create(parent, PCMK__XE_ST_NOTIFY_FENCE); crm_xml_add_int(notify_data, PCMK_XA_STATE, op->state); crm_xml_add(notify_data, PCMK__XA_ST_TARGET, op->target); crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ACTION, op->action); crm_xml_add(notify_data, PCMK__XA_ST_DELEGATE, op->delegate); crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(notify_data, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(notify_data, PCMK__XA_ST_CLIENTID, op->client_id); crm_xml_add(notify_data, PCMK__XA_ST_CLIENTNAME, op->client_name); return notify_data; } /*! * \internal * \brief Broadcast a fence result notification to all CPG peers * * \param[in] op Fencer operation that completed * \param[in] op_merged Whether this operation is a duplicate of another */ void fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged) { static int count = 0; xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); xmlNode *wrapper = NULL; xmlNode *notify_data = NULL; count++; crm_trace("Broadcasting result to peers"); crm_xml_add(bcast, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); crm_xml_add(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); crm_xml_add(bcast, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); crm_xml_add_int(bcast, PCMK_XA_COUNT, count); if (op_merged) { pcmk__xe_set_bool_attr(bcast, PCMK__XA_ST_OP_MERGED, true); } wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); notify_data = fencing_result2xml(wrapper, op); stonith__xe_set_result(notify_data, &op->result); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, bcast); pcmk__xml_free(bcast); return; } /*! * \internal * \brief Reply to a local request originator and notify all subscribed clients * * \param[in,out] op Fencer operation that completed * \param[in,out] data Top-level XML to add notification to */ static void handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data) { xmlNode *notify_data = NULL; xmlNode *reply = NULL; pcmk__client_t *client = NULL; if (op->notify_sent == TRUE) { /* nothing to do */ return; } /* Do notification with a clean data object */ crm_xml_add_int(data, PCMK_XA_STATE, op->state); crm_xml_add(data, PCMK__XA_ST_TARGET, op->target); crm_xml_add(data, PCMK__XA_ST_OP, op->action); reply = fenced_construct_reply(op->request, data, &op->result); crm_xml_add(reply, PCMK__XA_ST_DELEGATE, op->delegate); /* Send fencing OP reply to local client that initiated fencing */ client = pcmk__find_client_by_id(op->client_id); if (client == NULL) { crm_trace("Skipping reply to %s: no longer a client", op->client_id); } else { do_local_reply(reply, client, op->call_options); } /* bcast to all local clients that the fencing operation happend */ notify_data = fencing_result2xml(NULL, op); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, &op->result, notify_data); pcmk__xml_free(notify_data); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); /* mark this op as having notify's already sent */ op->notify_sent = TRUE; pcmk__xml_free(reply); } /*! * \internal * \brief Finalize all duplicates of a given fencer operation * * \param[in,out] op Fencer operation that completed * \param[in,out] data Top-level XML to add notification to */ static void finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data) { for (GList *iter = op->duplicates; iter != NULL; iter = iter->next) { remote_fencing_op_t *other = iter->data; if (other->state == st_duplicate) { other->state = op->state; crm_debug("Performing duplicate notification for %s@%s: %s " QB_XS " id=%.8s", other->client_name, other->originator, pcmk_exec_status_str(op->result.execution_status), other->id); pcmk__copy_result(&op->result, &other->result); finalize_op(other, data, true); } else { // Possible if (for example) it timed out already crm_err("Skipping duplicate notification for %s@%s " QB_XS " state=%s id=%.8s", other->client_name, other->originator, stonith_op_state_str(other->state), other->id); } } } static char * delegate_from_xml(xmlNode *xml) { xmlNode *match = pcmk__xpath_find_one(xml->doc, "//*[@" PCMK__XA_ST_DELEGATE "]", LOG_NEVER); if (match == NULL) { - return crm_element_value_copy(xml, PCMK__XA_SRC); + return pcmk__xe_get_copy(xml, PCMK__XA_SRC); } else { - return crm_element_value_copy(match, PCMK__XA_ST_DELEGATE); + return pcmk__xe_get_copy(match, PCMK__XA_ST_DELEGATE); } } /*! * \internal * \brief Finalize a peer fencing operation * * Clean up after a fencing operation completes. This function has two code * paths: the executioner uses it to broadcast the result to CPG peers, and then * each peer (including the executioner) uses it to process that broadcast and * notify its IPC clients of the result. * * \param[in,out] op Fencer operation that completed * \param[in,out] data If not NULL, XML reply of last delegated operation * \param[in] dup Whether this operation is a duplicate of another * (in which case, do not broadcast the result) * * \note The operation result should be set before calling this function. */ static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup) { int level = LOG_ERR; const char *subt = NULL; xmlNode *local_data = NULL; gboolean op_merged = FALSE; CRM_CHECK((op != NULL), return); // This is a no-op if timers have already been cleared clear_remote_op_timers(op); if (op->notify_sent) { // Most likely, this is a timed-out action that eventually completed crm_notice("Operation '%s'%s%s by %s for %s@%s%s: " "Result arrived too late " QB_XS " id=%.8s", op->action, (op->target? " targeting " : ""), (op->target? op->target : ""), (op->delegate? op->delegate : "unknown node"), op->client_name, op->originator, (op_merged? " (merged)" : ""), op->id); return; } set_fencing_completed(op); undo_op_remap(op); if (data == NULL) { data = pcmk__xe_create(NULL, "remote-op"); local_data = data; } else if (op->delegate == NULL) { switch (op->result.execution_status) { case PCMK_EXEC_NO_FENCE_DEVICE: break; case PCMK_EXEC_INVALID: if (op->result.exit_status != CRM_EX_EXPIRED) { op->delegate = delegate_from_xml(data); } break; default: op->delegate = delegate_from_xml(data); break; } } if (dup || (pcmk__xe_get(data, PCMK__XA_ST_OP_MERGED) != NULL)) { op_merged = true; } /* Tell everyone the operation is done, we will continue * with doing the local notifications once we receive * the broadcast back. */ subt = pcmk__xe_get(data, PCMK__XA_SUBT); if (!dup && !pcmk__str_eq(subt, PCMK__VALUE_BROADCAST, pcmk__str_none)) { /* Defer notification until the bcast message arrives */ fenced_broadcast_op_result(op, op_merged); pcmk__xml_free(local_data); return; } if (pcmk__result_ok(&op->result) || dup || !pcmk__str_eq(op->originator, fenced_get_local_node(), pcmk__str_casei)) { level = LOG_NOTICE; } do_crm_log(level, "Operation '%s'%s%s by %s for %s@%s%s: %s (%s%s%s) " QB_XS " id=%.8s", op->action, (op->target? " targeting " : ""), (op->target? op->target : ""), (op->delegate? op->delegate : "unknown node"), op->client_name, op->originator, (op_merged? " (merged)" : ""), crm_exit_str(op->result.exit_status), pcmk_exec_status_str(op->result.execution_status), ((op->result.exit_reason == NULL)? "" : ": "), ((op->result.exit_reason == NULL)? "" : op->result.exit_reason), op->id); handle_local_reply_and_notify(op, data); if (!dup) { finalize_op_duplicates(op, data); } /* Free non-essential parts of the record * Keep the record around so we can query the history */ if (op->query_results) { g_list_free_full(op->query_results, free_remote_query); op->query_results = NULL; } if (op->request) { pcmk__xml_free(op->request); op->request = NULL; } pcmk__xml_free(local_data); } /*! * \internal * \brief Finalize a watchdog fencer op after the waiting time expires * * \param[in,out] userdata Fencer operation that completed * * \return G_SOURCE_REMOVE (which tells glib not to restart timer) */ static gboolean remote_op_watchdog_done(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_one = 0; crm_notice("Self-fencing (%s) by %s for %s assumed complete " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); op->state = st_done; pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); finalize_op(op, NULL, false); return G_SOURCE_REMOVE; } static gboolean remote_op_timeout_one(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_one = 0; crm_notice("Peer's '%s' action targeting %s for client %s timed out " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, "Peer did not return fence result within timeout"); // The requested delay has been applied for the first device if (op->client_delay > 0) { op->client_delay = 0; crm_trace("Try another device for '%s' action targeting %s " "for client %s without delay " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); } // Try another device, if appropriate request_peer_fencing(op, NULL); return G_SOURCE_REMOVE; } /*! * \internal * \brief Finalize a remote fencer operation that timed out * * \param[in,out] op Fencer operation that timed out * \param[in] reason Readable description of what step timed out */ static void finalize_timed_out_op(remote_fencing_op_t *op, const char *reason) { crm_debug("Action '%s' targeting %s for client %s timed out " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); if (op->phase == st_phase_on) { /* A remapped reboot operation timed out in the "on" phase, but the * "off" phase completed successfully, so quit trying any further * devices, and return success. */ op->state = st_done; pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { op->state = st_failed; pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, reason); } finalize_op(op, NULL, false); } /*! * \internal * \brief Finalize a remote fencer operation that timed out * * \param[in,out] userdata Fencer operation that timed out * * \return G_SOURCE_REMOVE (which tells glib not to restart timer) */ static gboolean remote_op_timeout(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_total = 0; if (op->state == st_done) { crm_debug("Action '%s' targeting %s for client %s already completed " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); } else { finalize_timed_out_op(userdata, "Fencing did not complete within a " "total timeout based on the " "configured timeout and retries for " "any devices attempted"); } return G_SOURCE_REMOVE; } static gboolean remote_op_query_timeout(gpointer data) { remote_fencing_op_t *op = data; op->query_timer = 0; if (op->state == st_done) { crm_debug("Operation %.8s targeting %s already completed", op->id, op->target); } else if (op->state == st_exec) { crm_debug("Operation %.8s targeting %s already in progress", op->id, op->target); } else if (op->query_results) { // Query succeeded, so attempt the actual fencing crm_debug("Query %.8s targeting %s complete (state=%s)", op->id, op->target, stonith_op_state_str(op->state)); request_peer_fencing(op, NULL); } else { crm_debug("Query %.8s targeting %s timed out (state=%s)", op->id, op->target, stonith_op_state_str(op->state)); finalize_timed_out_op(op, "No capable peers replied to device query " "within timeout"); } return G_SOURCE_REMOVE; } static gboolean topology_is_empty(stonith_topology_t *tp) { int i; if (tp == NULL) { return TRUE; } for (i = 0; i < ST__LEVEL_COUNT; i++) { if (tp->levels[i] != NULL) { return FALSE; } } return TRUE; } /*! * \internal * \brief Add a device to an operation's automatic unfencing list * * \param[in,out] op Operation to modify * \param[in] device Device ID to add */ static void add_required_device(remote_fencing_op_t *op, const char *device) { GList *match = g_list_find_custom(op->automatic_list, device, sort_strings); if (!match) { op->automatic_list = g_list_prepend(op->automatic_list, pcmk__str_copy(device)); } } /*! * \internal * \brief Remove a device from the automatic unfencing list * * \param[in,out] op Operation to modify * \param[in] device Device ID to remove */ static void remove_required_device(remote_fencing_op_t *op, const char *device) { GList *match = g_list_find_custom(op->automatic_list, device, sort_strings); if (match) { op->automatic_list = g_list_remove(op->automatic_list, match->data); } } /* deep copy the device list */ static void set_op_device_list(remote_fencing_op_t * op, GList *devices) { GList *lpc = NULL; if (op->devices_list) { g_list_free_full(op->devices_list, free); op->devices_list = NULL; } for (lpc = devices; lpc != NULL; lpc = lpc->next) { const char *device = lpc->data; op->devices_list = g_list_append(op->devices_list, pcmk__str_copy(device)); } op->devices = op->devices_list; } /*! * \internal * \brief Check whether a node matches a topology target * * \param[in] tp Topology table entry to check * \param[in] node Name of node to check * * \return TRUE if node matches topology target */ static gboolean topology_matches(const stonith_topology_t *tp, const char *node) { regex_t r_patt; CRM_CHECK(node && tp && tp->target, return FALSE); switch (tp->kind) { case fenced_target_by_attribute: /* This level targets by attribute, so tp->target is a NAME=VALUE pair * of a permanent attribute applied to targeted nodes. The test below * relies on the locally cached copy of the CIB, so if fencing needs to * be done before the initial CIB is received or after a malformed CIB * is received, then the topology will be unable to be used. */ if (node_has_attr(node, tp->target_attribute, tp->target_value)) { crm_notice("Matched %s with %s by attribute", node, tp->target); return TRUE; } break; case fenced_target_by_pattern: /* This level targets node names matching a pattern, so tp->target * (and tp->target_pattern) is a regular expression. */ if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) { crm_info("Bad regex '%s' for fencing level", tp->target); } else { int status = regexec(&r_patt, node, 0, NULL, 0); regfree(&r_patt); if (status == 0) { crm_notice("Matched %s with %s by name", node, tp->target); return TRUE; } } break; case fenced_target_by_name: crm_trace("Testing %s against %s", node, tp->target); return pcmk__str_eq(tp->target, node, pcmk__str_casei); default: break; } crm_trace("No match for %s with %s", node, tp->target); return FALSE; } stonith_topology_t * find_topology_for_host(const char *host) { GHashTableIter tIter; stonith_topology_t *tp = g_hash_table_lookup(topology, host); if(tp != NULL) { crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology)); return tp; } g_hash_table_iter_init(&tIter, topology); while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) { if (topology_matches(tp, host)) { crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology)); return tp; } } crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology)); return NULL; } /*! * \internal * \brief Set fencing operation's device list to target's next topology level * * \param[in,out] op Remote fencing operation to modify * \param[in] empty_ok If true, an operation without a target (i.e. * queries) or a target without a topology will get a * pcmk_rc_ok return value instead of ENODEV * * \return Standard Pacemaker return value */ static int advance_topology_level(remote_fencing_op_t *op, bool empty_ok) { stonith_topology_t *tp = NULL; if (op->target) { tp = find_topology_for_host(op->target); } if (topology_is_empty(tp)) { return empty_ok? pcmk_rc_ok : ENODEV; } pcmk__assert(tp->levels != NULL); stonith__set_call_options(op->call_options, op->id, st_opt_topology); /* This is a new level, so undo any remapping left over from previous */ undo_op_remap(op); do { op->level++; } while (op->level < ST__LEVEL_COUNT && tp->levels[op->level] == NULL); if (op->level < ST__LEVEL_COUNT) { crm_trace("Attempting fencing level %d targeting %s (%d devices) " "for client %s@%s (id=%.8s)", op->level, op->target, g_list_length(tp->levels[op->level]), op->client_name, op->originator, op->id); set_op_device_list(op, tp->levels[op->level]); // The requested delay has been applied for the first fencing level if ((op->level > 1) && (op->client_delay > 0)) { op->client_delay = 0; } if ((g_list_next(op->devices_list) != NULL) && pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A reboot has been requested for a topology level with multiple * devices. Instead of rebooting the devices sequentially, we will * turn them all off, then turn them all on again. (Think about * switched power outlets for redundant power supplies.) */ op_phase_off(op); } return pcmk_rc_ok; } crm_info("All %sfencing options targeting %s for client %s@%s failed " QB_XS " id=%.8s", (stonith_watchdog_timeout_ms > 0)?"non-watchdog ":"", op->target, op->client_name, op->originator, op->id); return ENODEV; } /*! * \internal * \brief If fencing operation is a duplicate, merge it into the other one * * \param[in,out] op Fencing operation to check */ static void merge_duplicates(remote_fencing_op_t *op) { GHashTableIter iter; remote_fencing_op_t *other = NULL; time_t now = time(NULL); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) { const char *other_action = op_requested_action(other); pcmk__node_status_t *node = NULL; if (!strcmp(op->id, other->id)) { continue; // Don't compare against self } if (other->state > st_exec) { crm_trace("%.8s not duplicate of %.8s: not in progress", op->id, other->id); continue; } if (!pcmk__str_eq(op->target, other->target, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: node %s vs. %s", op->id, other->id, op->target, other->target); continue; } if (!pcmk__str_eq(op->action, other_action, pcmk__str_none)) { crm_trace("%.8s not duplicate of %.8s: action %s vs. %s", op->id, other->id, op->action, other_action); continue; } if (pcmk__str_eq(op->client_name, other->client_name, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: same client %s", op->id, other->id, op->client_name); continue; } if (pcmk__str_eq(other->target, other->originator, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: self-fencing for %s", op->id, other->id, other->target); continue; } node = pcmk__get_node(0, other->originator, NULL, pcmk__node_search_cluster_member); if (!fencing_peer_active(node)) { crm_notice("Failing action '%s' targeting %s originating from " "client %s@%s: Originator is dead " QB_XS " id=%.8s", other->action, other->target, other->client_name, other->originator, other->id); crm_trace("%.8s not duplicate of %.8s: originator dead", op->id, other->id); other->state = st_failed; continue; } if ((other->total_timeout > 0) && (now > (other->total_timeout + other->created))) { crm_trace("%.8s not duplicate of %.8s: old (%lld vs. %lld + %ds)", op->id, other->id, (long long)now, (long long)other->created, other->total_timeout); continue; } /* There is another in-flight request to fence the same host * Piggyback on that instead. If it fails, so do we. */ other->duplicates = g_list_append(other->duplicates, op); if (other->total_timeout == 0) { other->total_timeout = op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL); crm_trace("Best guess as to timeout used for %.8s: %ds", other->id, other->total_timeout); } crm_notice("Merging fencing action '%s' targeting %s originating from " "client %s with identical request from %s@%s " QB_XS " original=%.8s duplicate=%.8s total_timeout=%ds", op->action, op->target, op->client_name, other->client_name, other->originator, op->id, other->id, other->total_timeout); report_timeout_period(op, other->total_timeout); op->state = st_duplicate; } } static uint32_t fencing_active_peers(void) { uint32_t count = 0; pcmk__node_status_t *entry = NULL; GHashTableIter gIter; g_hash_table_iter_init(&gIter, pcmk__peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if(fencing_peer_active(entry)) { count++; } } return count; } /*! * \internal * \brief Process a manual confirmation of a pending fence action * * \param[in] client IPC client that sent confirmation * \param[in,out] msg Request XML with manual confirmation * * \return Standard Pacemaker return code */ int fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg) { remote_fencing_op_t *op = NULL; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_ERR); CRM_CHECK(dev != NULL, return EPROTO); crm_notice("Received manual confirmation that %s has been fenced", pcmk__s(pcmk__xe_get(dev, PCMK__XA_ST_TARGET), "unknown target")); op = initiate_remote_stonith_op(client, msg, TRUE); if (op == NULL) { return EPROTO; } op->state = st_done; op->delegate = pcmk__str_copy("a human"); // For the fencer's purposes, the fencing operation is done pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); finalize_op(op, msg, false); /* For the requester's purposes, the operation is still pending. The * actual result will be sent asynchronously via the operation's done_cb(). */ return EINPROGRESS; } /*! * \internal * \brief Create a new remote stonith operation * * \param[in] client ID of local stonith client that initiated the operation * \param[in] request The request from the client that started the operation * \param[in] peer TRUE if this operation is owned by another stonith peer * (an operation owned by one peer is stored on all peers, * but only the owner executes it; all nodes get the results * once the owner finishes execution) */ void * create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) { remote_fencing_op_t *op = NULL; xmlNode *dev = pcmk__xpath_find_one(request->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_NEVER); int rc = pcmk_rc_ok; const char *operation = NULL; init_stonith_remote_op_hash_table(&stonith_remote_op_list); /* If this operation is owned by another node, check to make * sure we haven't already created this operation. */ if (peer && dev) { const char *op_id = pcmk__xe_get(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(op_id != NULL, return NULL); op = g_hash_table_lookup(stonith_remote_op_list, op_id); if (op) { crm_debug("Reusing existing remote fencing op %.8s for %s", op_id, ((client == NULL)? "unknown client" : client)); return op; } } op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); pcmk__xe_get_int(request, PCMK__XA_ST_TIMEOUT, &(op->base_timeout)); // Value -1 means disable any static/random fencing delays pcmk__xe_get_int(request, PCMK__XA_ST_DELAY, &(op->client_delay)); if (peer && dev) { - op->id = crm_element_value_copy(dev, PCMK__XA_ST_REMOTE_OP); + op->id = pcmk__xe_get_copy(dev, PCMK__XA_ST_REMOTE_OP); } else { op->id = crm_generate_uuid(); } g_hash_table_replace(stonith_remote_op_list, op->id, op); op->state = st_query; op->replies_expected = fencing_active_peers(); - op->action = crm_element_value_copy(dev, PCMK__XA_ST_DEVICE_ACTION); + op->action = pcmk__xe_get_copy(dev, PCMK__XA_ST_DEVICE_ACTION); /* The node initiating the stonith operation. If an operation is relayed, * this is the last node the operation lands on. When in standalone mode, * origin is the ID of the client that originated the operation. * * Or may be the name of the function that created the operation. */ - op->originator = crm_element_value_copy(dev, PCMK__XA_ST_ORIGIN); + op->originator = pcmk__xe_get_copy(dev, PCMK__XA_ST_ORIGIN); if (op->originator == NULL) { /* Local or relayed request */ op->originator = pcmk__str_copy(fenced_get_local_node()); } // Delegate may not be set - op->delegate = crm_element_value_copy(dev, PCMK__XA_ST_DELEGATE); + op->delegate = pcmk__xe_get_copy(dev, PCMK__XA_ST_DELEGATE); op->created = time(NULL); CRM_LOG_ASSERT(client != NULL); op->client_id = pcmk__str_copy(client); /* For a RELAY operation, set fenced on the client. */ operation = pcmk__xe_get(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { op->client_name = crm_strdup_printf("%s.%lu", crm_system_name, (unsigned long) getpid()); } else { - op->client_name = crm_element_value_copy(request, - PCMK__XA_ST_CLIENTNAME); + op->client_name = pcmk__xe_get_copy(request, PCMK__XA_ST_CLIENTNAME); } - op->target = crm_element_value_copy(dev, PCMK__XA_ST_TARGET); + op->target = pcmk__xe_get_copy(dev, PCMK__XA_ST_TARGET); // @TODO Figure out how to avoid copying XML here op->request = pcmk__xml_copy(NULL, request); rc = pcmk__xe_get_flags(request, PCMK__XA_ST_CALLOPT, &(op->call_options), 0U); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request %s: %s", op->id, pcmk_rc_str(rc)); } pcmk__xe_get_int(request, PCMK__XA_ST_CALLID, &(op->client_callid)); crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, " "base timeout %ds, %u %s expected)", (peer && dev)? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name, op->base_timeout, op->replies_expected, pcmk__plural_alt(op->replies_expected, "reply", "replies")); if (op->call_options & st_opt_cs_nodeid) { int nodeid; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(op->target, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); /* Ensure the conversion only happens once */ stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid); if ((node != NULL) && (node->name != NULL)) { pcmk__str_update(&(op->target), node->name); } else { crm_warn("Could not expand nodeid '%s' into a host name", op->target); } } /* check to see if this is a duplicate operation of another in-flight operation */ merge_duplicates(op); if (op->state != st_duplicate) { /* kick history readers */ fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } /* safe to trim as long as that doesn't touch pending ops */ stonith_fence_history_trim(); return op; } /*! * \internal * \brief Create a peer fencing operation from a request, and initiate it * * \param[in] client IPC client that made request (NULL to get from request) * \param[in] request Request XML * \param[in] manual_ack Whether this is a manual action confirmation * * \return Newly created operation on success, otherwise NULL */ remote_fencing_op_t * initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, gboolean manual_ack) { int query_timeout = 0; xmlNode *query = NULL; const char *client_id = NULL; remote_fencing_op_t *op = NULL; const char *relay_op_id = NULL; const char *operation = NULL; if (client) { client_id = client->id; } else { client_id = pcmk__xe_get(request, PCMK__XA_ST_CLIENTID); } CRM_LOG_ASSERT(client_id != NULL); op = create_remote_stonith_op(client_id, request, FALSE); op->owner = TRUE; if (manual_ack) { return op; } CRM_CHECK(op->action, return NULL); if (advance_topology_level(op, true) != pcmk_rc_ok) { op->state = st_failed; } switch (op->state) { case st_failed: // advance_topology_level() exhausted levels pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "All topology levels failed"); crm_warn("Could not request peer fencing (%s) targeting %s " QB_XS " id=%.8s", op->action, op->target, op->id); finalize_op(op, NULL, false); return op; case st_duplicate: crm_info("Requesting peer fencing (%s) targeting %s (duplicate) " QB_XS " id=%.8s", op->action, op->target, op->id); return op; default: crm_notice("Requesting peer fencing (%s) targeting %s " QB_XS " id=%.8s state=%s base_timeout=%ds", op->action, op->target, op->id, stonith_op_state_str(op->state), op->base_timeout); } query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY, NULL, op->call_options); crm_xml_add(query, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(query, PCMK__XA_ST_TARGET, op->target); crm_xml_add(query, PCMK__XA_ST_DEVICE_ACTION, op_requested_action(op)); crm_xml_add(query, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(query, PCMK__XA_ST_CLIENTID, op->client_id); crm_xml_add(query, PCMK__XA_ST_CLIENTNAME, op->client_name); crm_xml_add_int(query, PCMK__XA_ST_TIMEOUT, op->base_timeout); /* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */ operation = pcmk__xe_get(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { relay_op_id = pcmk__xe_get(request, PCMK__XA_ST_REMOTE_OP); if (relay_op_id) { crm_xml_add(query, PCMK__XA_ST_REMOTE_OP_RELAY, relay_op_id); } } pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, query); pcmk__xml_free(query); query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR; op->query_timer = pcmk__create_timer((1000 * query_timeout), remote_op_query_timeout, op); return op; } enum find_best_peer_options { /*! Skip checking the target peer for capable fencing devices */ FIND_PEER_SKIP_TARGET = 0x0001, /*! Only check the target peer for capable fencing devices */ FIND_PEER_TARGET_ONLY = 0x0002, /*! Skip peers and devices that are not verified */ FIND_PEER_VERIFIED_ONLY = 0x0004, }; static bool is_watchdog_fencing(const remote_fencing_op_t *op, const char *device) { return (stonith_watchdog_timeout_ms > 0 // Only an explicit mismatch is considered not a watchdog fencing. && pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_null_matches) && pcmk__is_fencing_action(op->action) && node_does_watchdog_fencing(op->target)); } static peer_device_info_t * find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options) { GList *iter = NULL; gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE; if (!device && pcmk_is_set(op->call_options, st_opt_topology)) { return NULL; } for (iter = op->query_results; iter != NULL; iter = iter->next) { peer_device_info_t *peer = iter->data; crm_trace("Testing result from %s targeting %s with %d device%s: %d %x", peer->host, op->target, peer->ndevices, pcmk__plural_s(peer->ndevices), peer->tried, options); if ((options & FIND_PEER_SKIP_TARGET) && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } if ((options & FIND_PEER_TARGET_ONLY) && !pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } if (pcmk_is_set(op->call_options, st_opt_topology)) { if (grab_peer_device(op, peer, device, verified_devices_only)) { return peer; } } else if (!peer->tried && count_peer_devices(op, peer, verified_devices_only, fenced_support_flag(op->action))) { /* No topology: Use the current best peer */ crm_trace("Simple fencing"); return peer; } } return NULL; } static peer_device_info_t * stonith_choose_peer(remote_fencing_op_t * op) { const char *device = NULL; peer_device_info_t *peer = NULL; uint32_t active = fencing_active_peers(); do { if (op->devices) { device = op->devices->data; crm_trace("Checking for someone to fence (%s) %s using %s", op->action, op->target, device); } else { crm_trace("Checking for someone to fence (%s) %s", op->action, op->target); } /* Best choice is a peer other than the target with verified access */ peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY); if (peer) { crm_trace("Found verified peer %s for %s", peer->host, device?device:""); return peer; } if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) { crm_trace("Waiting before looking for unverified devices to fence %s", op->target); return NULL; } /* If no other peer has verified access, next best is unverified access */ peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET); if (peer) { crm_trace("Found best unverified peer %s", peer->host); return peer; } /* If no other peer can do it, last option is self-fencing * (which is never allowed for the "on" phase of a remapped reboot) */ if (op->phase != st_phase_on) { peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY); if (peer) { crm_trace("%s will fence itself", peer->host); return peer; } } /* Try the next fencing level if there is one (unless we're in the "on" * phase of a remapped "reboot", because we ignore errors in that case) */ } while ((op->phase != st_phase_on) && pcmk_is_set(op->call_options, st_opt_topology) && (advance_topology_level(op, false) == pcmk_rc_ok)); /* With a simple watchdog fencing configuration without a topology, * "device" is NULL here. Consider it should be done with watchdog fencing. */ if (is_watchdog_fencing(op, device)) { crm_info("Couldn't contact watchdog-fencing target-node (%s)", op->target); /* check_watchdog_fencing_and_wait will log additional info */ } else { crm_notice("Couldn't find anyone to fence (%s) %s using %s", op->action, op->target, (device? device : "any device")); } return NULL; } static int valid_fencing_timeout(int specified_timeout, bool action_specific, const remote_fencing_op_t *op, const char *device) { int timeout = specified_timeout; if (!is_watchdog_fencing(op, device)) { return timeout; } timeout = (int) QB_MIN(QB_MAX(specified_timeout, pcmk__timeout_ms2s(stonith_watchdog_timeout_ms)), INT_MAX); if (timeout > specified_timeout) { if (action_specific) { crm_warn("pcmk_%s_timeout %ds for %s is too short (must be >= " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " "instead", op->action, specified_timeout, device? device : "watchdog", timeout, timeout); } else { crm_warn("Fencing timeout %ds is too short (must be >= " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " "instead", specified_timeout, timeout, timeout); } } return timeout; } static int get_device_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, bool with_delay) { int timeout = op->base_timeout; device_properties_t *props; timeout = valid_fencing_timeout(op->base_timeout, false, op, device); if (!peer || !device) { return timeout; } props = g_hash_table_lookup(peer->devices, device); if (!props) { return timeout; } if (props->custom_action_timeout[op->phase]) { timeout = valid_fencing_timeout(props->custom_action_timeout[op->phase], true, op, device); } // op->client_delay < 0 means disable any static/random fencing delays if (with_delay && (op->client_delay >= 0)) { // delay_base is eventually limited by delay_max timeout += (props->delay_max[op->phase] > 0 ? props->delay_max[op->phase] : props->delay_base[op->phase]); } return timeout; } struct timeout_data { const remote_fencing_op_t *op; const peer_device_info_t *peer; int total_timeout; }; /*! * \internal * \brief Add timeout to a total if device has not been executed yet * * \param[in] key GHashTable key (device ID) * \param[in] value GHashTable value (device properties) * \param[in,out] user_data Timeout data */ static void add_device_timeout(gpointer key, gpointer value, gpointer user_data) { const char *device_id = key; device_properties_t *props = value; struct timeout_data *timeout = user_data; if (!props->executed[timeout->op->phase] && !props->disallowed[timeout->op->phase]) { timeout->total_timeout += get_device_timeout(timeout->op, timeout->peer, device_id, true); } } static int get_peer_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer) { struct timeout_data timeout; timeout.op = op; timeout.peer = peer; timeout.total_timeout = 0; g_hash_table_foreach(peer->devices, add_device_timeout, &timeout); return (timeout.total_timeout? timeout.total_timeout : op->base_timeout); } static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer) { long long total_timeout = 0; stonith_topology_t *tp = find_topology_for_host(op->target); if (pcmk_is_set(op->call_options, st_opt_topology) && tp) { int i; GList *device_list = NULL; GList *iter = NULL; GList *auto_list = NULL; if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none) && (op->automatic_list != NULL)) { auto_list = g_list_copy(op->automatic_list); } /* Yep, this looks scary, nested loops all over the place. * Here is what is going on. * Loop1: Iterate through fencing levels. * Loop2: If a fencing level has devices, loop through each device * Loop3: For each device in a fencing level, see what peer owns it * and what that peer has reported the timeout is for the device. */ for (i = 0; i < ST__LEVEL_COUNT; i++) { if (!tp->levels[i]) { continue; } for (device_list = tp->levels[i]; device_list; device_list = device_list->next) { bool found = false; for (iter = op->query_results; iter != NULL; iter = iter->next) { const peer_device_info_t *peer = iter->data; if (auto_list) { GList *match = g_list_find_custom(auto_list, device_list->data, sort_strings); if (match) { auto_list = g_list_remove(auto_list, match->data); } } if (find_peer_device(op, peer, device_list->data, fenced_support_flag(op->action))) { total_timeout += get_device_timeout(op, peer, device_list->data, true); found = true; break; } } /* End Loop3: match device with peer that owns device, find device's timeout period */ /* in case of watchdog-device we add the timeout to the budget if didn't get a reply */ if (!found && is_watchdog_fencing(op, device_list->data)) { total_timeout += pcmk__timeout_ms2s(stonith_watchdog_timeout_ms); } } /* End Loop2: iterate through devices at a specific level */ } /*End Loop1: iterate through fencing levels */ //Add only exists automatic_list device timeout if (auto_list) { for (iter = auto_list; iter != NULL; iter = iter->next) { GList *iter2 = NULL; for (iter2 = op->query_results; iter2 != NULL; iter = iter2->next) { peer_device_info_t *peer = iter2->data; if (find_peer_device(op, peer, iter->data, st_device_supports_on)) { total_timeout += get_device_timeout(op, peer, iter->data, true); break; } } } } g_list_free(auto_list); } else if (chosen_peer) { total_timeout = get_peer_timeout(op, chosen_peer); } else { total_timeout = valid_fencing_timeout(op->base_timeout, false, op, NULL); } if (total_timeout <= 0) { total_timeout = op->base_timeout; } /* Take any requested fencing delay into account to prevent it from eating * up the total timeout. */ if (op->client_delay > 0) { total_timeout += op->client_delay; } return (int) QB_MIN(total_timeout, INT_MAX); } static void report_timeout_period(remote_fencing_op_t * op, int op_timeout) { GList *iter = NULL; xmlNode *update = NULL; const char *client_node = NULL; const char *client_id = NULL; const char *call_id = NULL; if (op->call_options & st_opt_sync_call) { /* There is no reason to report the timeout for a synchronous call. It * is impossible to use the reported timeout to do anything when the client * is blocking for the response. This update is only important for * async calls that require a callback to report the results in. */ return; } else if (!op->request) { return; } crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id); client_node = pcmk__xe_get(op->request, PCMK__XA_ST_CLIENTNODE); call_id = pcmk__xe_get(op->request, PCMK__XA_ST_CALLID); client_id = pcmk__xe_get(op->request, PCMK__XA_ST_CLIENTID); if (!client_node || !call_id || !client_id) { return; } if (pcmk__str_eq(client_node, fenced_get_local_node(), pcmk__str_casei)) { // Client is connected to this node, so send update directly to them do_stonith_async_timeout_update(client_id, call_id, op_timeout); return; } /* The client is connected to another node, relay this update to them */ update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0); crm_xml_add(update, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(update, PCMK__XA_ST_CLIENTID, client_id); crm_xml_add(update, PCMK__XA_ST_CALLID, call_id); crm_xml_add_int(update, PCMK__XA_ST_TIMEOUT, op_timeout); pcmk__cluster_send_message(pcmk__get_node(0, client_node, NULL, pcmk__node_search_cluster_member), pcmk_ipc_fenced, update); pcmk__xml_free(update); for (iter = op->duplicates; iter != NULL; iter = iter->next) { remote_fencing_op_t *dup = iter->data; crm_trace("Reporting timeout for duplicate %.8s to client %s", dup->id, dup->client_name); report_timeout_period(iter->data, op_timeout); } } /*! * \internal * \brief Advance an operation to the next device in its topology * * \param[in,out] op Fencer operation to advance * \param[in] device ID of device that just completed * \param[in,out] msg If not NULL, XML reply of last delegated operation */ static void advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, xmlNode *msg) { /* Advance to the next device at this topology level, if any */ if (op->devices) { op->devices = op->devices->next; } /* Handle automatic unfencing if an "on" action was requested */ if ((op->phase == st_phase_requested) && pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) { /* If the device we just executed was required, it's not anymore */ remove_required_device(op, device); /* If there are no more devices at this topology level, run through any * remaining devices with automatic unfencing */ if (op->devices == NULL) { op->devices = op->automatic_list; } } if ((op->devices == NULL) && (op->phase == st_phase_off)) { /* We're done with this level and with required devices, but we had * remapped "reboot" to "off", so start over with "on". If any devices * need to be turned back on, op->devices will be non-NULL after this. */ op_phase_on(op); } // This function is only called if the previous device succeeded pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (op->devices) { /* Necessary devices remain, so execute the next one */ crm_trace("Next targeting %s on behalf of %s@%s", op->target, op->client_name, op->originator); // The requested delay has been applied for the first device if (op->client_delay > 0) { op->client_delay = 0; } request_peer_fencing(op, NULL); } else { /* We're done with all devices and phases, so finalize operation */ crm_trace("Marking complex fencing op targeting %s as complete", op->target); op->state = st_done; finalize_op(op, msg, false); } } static gboolean check_watchdog_fencing_and_wait(remote_fencing_op_t * op) { if (node_does_watchdog_fencing(op->target)) { guint timeout_ms = QB_MIN(stonith_watchdog_timeout_ms, UINT_MAX); crm_notice("Waiting %s for %s to self-fence (%s) for " "client %s " QB_XS " id=%.8s", pcmk__readable_interval(timeout_ms), op->target, op->action, op->client_name, op->id); if (op->op_timer_one) { g_source_remove(op->op_timer_one); } op->op_timer_one = pcmk__create_timer(timeout_ms, remote_op_watchdog_done, op); return TRUE; } else { crm_debug("Skipping fallback to watchdog-fencing as %s is " "not in host-list", op->target); } return FALSE; } /*! * \internal * \brief Ask a peer to execute a fencing operation * * \param[in,out] op Fencing operation to be executed * \param[in,out] peer If NULL or topology is in use, choose best peer to * execute the fencing, otherwise use this peer */ static void request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) { const char *device = NULL; int timeout; CRM_CHECK(op != NULL, return); crm_trace("Action %.8s targeting %s for %s is %s", op->id, op->target, op->client_name, stonith_op_state_str(op->state)); if ((op->phase == st_phase_on) && (op->devices != NULL)) { /* We are in the "on" phase of a remapped topology reboot. If this * device has pcmk_reboot_action="off", or doesn't support the "on" * action, skip it. * * We can't check device properties at this point because we haven't * chosen a peer for this stage yet. Instead, we check the local node's * knowledge about the device. If different versions of the fence agent * are installed on different nodes, there's a chance this could be * mistaken, but the worst that could happen is we don't try turning the * node back on when we should. */ device = op->devices->data; if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF, pcmk__str_none)) { crm_info("Not turning %s back on using %s because the device is " "configured to stay off (pcmk_reboot_action='off')", op->target, device); advance_topology_device_in_level(op, device, NULL); return; } if (!fenced_device_supports_on(device)) { crm_info("Not turning %s back on using %s because the agent " "doesn't support 'on'", op->target, device); advance_topology_device_in_level(op, device, NULL); return; } } timeout = op->base_timeout; if ((peer == NULL) && !pcmk_is_set(op->call_options, st_opt_topology)) { peer = stonith_choose_peer(op); } if (!op->op_timer_total) { op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, peer); op->op_timer_total = pcmk__create_timer(1000 * op->total_timeout, remote_op_timeout, op); report_timeout_period(op, op->total_timeout); crm_info("Total timeout set to %ds for peer's fencing targeting %s for %s " QB_XS " id=%.8s", op->total_timeout, op->target, op->client_name, op->id); } if (pcmk_is_set(op->call_options, st_opt_topology) && op->devices) { /* Ignore the caller's peer preference if topology is in use, because * that peer might not have access to the required device. With * topology, stonith_choose_peer() removes the device from further * consideration, so the timeout must be calculated beforehand. * * @TODO Basing the total timeout on the caller's preferred peer (above) * is less than ideal. */ peer = stonith_choose_peer(op); device = op->devices->data; /* Fencing timeout sent to peer takes no delay into account. * The peer will add a dedicated timer for any delay upon * schedule_stonith_command(). */ timeout = get_device_timeout(op, peer, device, false); } if (peer) { int timeout_one = 0; xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0); const pcmk__node_status_t *peer_node = pcmk__get_node(0, peer->host, NULL, pcmk__node_search_cluster_member); if (op->client_delay > 0) { /* Take requested fencing delay into account to prevent it from * eating up the timeout. */ timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay; } crm_xml_add(remote_op, PCMK__XA_ST_REMOTE_OP, op->id); crm_xml_add(remote_op, PCMK__XA_ST_TARGET, op->target); crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ACTION, op->action); crm_xml_add(remote_op, PCMK__XA_ST_ORIGIN, op->originator); crm_xml_add(remote_op, PCMK__XA_ST_CLIENTID, op->client_id); crm_xml_add(remote_op, PCMK__XA_ST_CLIENTNAME, op->client_name); crm_xml_add_int(remote_op, PCMK__XA_ST_TIMEOUT, timeout); crm_xml_add_int(remote_op, PCMK__XA_ST_CALLOPT, op->call_options); crm_xml_add_int(remote_op, PCMK__XA_ST_DELAY, op->client_delay); if (device) { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_device_timeout(op, peer, device, true); crm_notice("Requesting that %s perform '%s' action targeting %s " "using %s " QB_XS " for client %s (%ds)", peer->host, op->action, op->target, device, op->client_name, timeout_one); crm_xml_add(remote_op, PCMK__XA_ST_DEVICE_ID, device); } else { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer); crm_notice("Requesting that %s perform '%s' action targeting %s " QB_XS " for client %s (%ds, %s)", peer->host, op->action, op->target, op->client_name, timeout_one, pcmk__readable_interval(stonith_watchdog_timeout_ms)); } op->state = st_exec; if (op->op_timer_one) { g_source_remove(op->op_timer_one); op->op_timer_one = 0; } if (!is_watchdog_fencing(op, device) || !check_watchdog_fencing_and_wait(op)) { /* Some thoughts about self-fencing cases reaching this point: - Actually check in check_watchdog_fencing_and_wait shouldn't fail if STONITH_WATCHDOG_ID is chosen as fencing-device and it being present implies watchdog-fencing is enabled anyway - If watchdog-fencing is disabled either in general or for a specific target - detected in check_watchdog_fencing_and_wait - for some other kind of self-fencing we can't expect a success answer but timeout is fine if the node doesn't come back in between - Delicate might be the case where we have watchdog-fencing enabled for a node but the watchdog-fencing-device isn't explicitly chosen for self-fencing. Local scheduler execution in sbd might detect the node as unclean and lead to timely self-fencing. Otherwise the selection of PCMK_OPT_STONITH_WATCHDOG_TIMEOUT at least is questionable. */ /* coming here we're not waiting for watchdog timeout - thus engage timer with timout evaluated before */ op->op_timer_one = pcmk__create_timer((1000 * timeout_one), remote_op_timeout_one, op); } pcmk__cluster_send_message(peer_node, pcmk_ipc_fenced, remote_op); peer->tried = TRUE; pcmk__xml_free(remote_op); return; } else if (op->phase == st_phase_on) { /* A remapped "on" cannot be executed, but the node was already * turned off successfully, so ignore the error and continue. */ crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s " "after successful 'off'", device, op->target); advance_topology_device_in_level(op, device, NULL); return; } else if (op->owner == FALSE) { crm_err("Fencing (%s) targeting %s for client %s is not ours to control", op->action, op->target, op->client_name); } else if (op->query_timer == 0) { /* We've exhausted all available peers */ crm_info("No remaining peers capable of fencing (%s) %s for client %s " QB_XS " state=%s", op->action, op->target, op->client_name, stonith_op_state_str(op->state)); CRM_CHECK(op->state < st_done, return); finalize_timed_out_op(op, "All nodes failed, or are unable, to " "fence target"); } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) { /* if the operation never left the query state, * but we have all the expected replies, then no devices * are available to execute the fencing operation. */ if (is_watchdog_fencing(op, device) && check_watchdog_fencing_and_wait(op)) { /* Consider a watchdog fencing targeting an offline node executing * once it starts waiting for the target to self-fence. So that when * the query timer pops, remote_op_query_timeout() considers the * fencing already in progress. */ op->state = st_exec; return; } if (op->state == st_query) { crm_info("No peers (out of %d) have devices capable of fencing " "(%s) %s for client %s " QB_XS " state=%s", op->replies, op->action, op->target, op->client_name, stonith_op_state_str(op->state)); pcmk__reset_result(&op->result); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); } else { if (pcmk_is_set(op->call_options, st_opt_topology)) { pcmk__reset_result(&op->result); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); } /* ... else use existing result from previous failed attempt * (topology is not in use, and no devices remain to be attempted). * Overwriting the result with PCMK_EXEC_NO_FENCE_DEVICE would * prevent finalize_op() from setting the correct delegate if * needed. */ crm_info("No peers (out of %d) are capable of fencing (%s) %s " "for client %s " QB_XS " state=%s", op->replies, op->action, op->target, op->client_name, stonith_op_state_str(op->state)); } op->state = st_failed; finalize_op(op, NULL, false); } else { crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s " "for client %s " QB_XS " id=%.8s", op->action, op->target, (device? " using " : ""), (device? device : ""), op->client_name, op->id); } } /*! * \internal * \brief Comparison function for sorting query results * * \param[in] a GList item to compare * \param[in] b GList item to compare * * \return Per the glib documentation, "a negative integer if the first value * comes before the second, 0 if they are equal, or a positive integer * if the first value comes after the second." */ static gint sort_peers(gconstpointer a, gconstpointer b) { const peer_device_info_t *peer_a = a; const peer_device_info_t *peer_b = b; return (peer_b->ndevices - peer_a->ndevices); } /*! * \internal * \brief Determine if all the devices in the topology are found or not * * \param[in] op Fencing operation with topology to check */ static gboolean all_topology_devices_found(const remote_fencing_op_t *op) { GList *device = NULL; GList *iter = NULL; device_properties_t *match = NULL; stonith_topology_t *tp = NULL; gboolean skip_target = FALSE; int i; tp = find_topology_for_host(op->target); if (!tp) { return FALSE; } if (pcmk__is_fencing_action(op->action)) { /* Don't count the devices on the target node if we are killing * the target node. */ skip_target = TRUE; } for (i = 0; i < ST__LEVEL_COUNT; i++) { for (device = tp->levels[i]; device; device = device->next) { match = NULL; for (iter = op->query_results; iter && !match; iter = iter->next) { peer_device_info_t *peer = iter->data; if (skip_target && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } match = find_peer_device(op, peer, device->data, st_device_supports_none); } if (!match) { return FALSE; } } } return TRUE; } /*! * \internal * \brief Parse action-specific device properties from XML * * \param[in] xml XML element containing the properties * \param[in] peer Name of peer that sent XML (for logs) * \param[in] device Device ID (for logs) * \param[in] action Action the properties relate to (for logs) * \param[in,out] op Fencing operation that properties are being parsed for * \param[in] phase Phase the properties relate to * \param[in,out] props Device properties to update */ static void parse_action_specific(const xmlNode *xml, const char *peer, const char *device, const char *action, remote_fencing_op_t *op, enum st_remap_phase phase, device_properties_t *props) { props->custom_action_timeout[phase] = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, &props->custom_action_timeout[phase]); if (props->custom_action_timeout[phase]) { crm_trace("Peer %s with device %s returned %s action timeout %ds", peer, device, action, props->custom_action_timeout[phase]); } props->delay_max[phase] = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_DELAY_MAX, &props->delay_max[phase]); if (props->delay_max[phase]) { crm_trace("Peer %s with device %s returned maximum of random delay %ds for %s", peer, device, props->delay_max[phase], action); } props->delay_base[phase] = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_DELAY_BASE, &props->delay_base[phase]); if (props->delay_base[phase]) { crm_trace("Peer %s with device %s returned base delay %ds for %s", peer, device, props->delay_base[phase], action); } /* Handle devices with automatic unfencing */ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { int required = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_REQUIRED, &required); if (required) { crm_trace("Peer %s requires device %s to execute for action %s", peer, device, action); add_required_device(op, device); } } /* If a reboot is remapped to off+on, it's possible that a node is allowed * to perform one action but not another. */ if (pcmk__xe_attr_is_true(xml, PCMK__XA_ST_ACTION_DISALLOWED)) { props->disallowed[phase] = TRUE; crm_trace("Peer %s is disallowed from executing %s for device %s", peer, action, device); } } /*! * \internal * \brief Parse one device's properties from peer's XML query reply * * \param[in] xml XML node containing device properties * \param[in,out] op Operation that query and reply relate to * \param[in,out] peer Peer's device information * \param[in] device ID of device being parsed */ static void add_device_properties(const xmlNode *xml, remote_fencing_op_t *op, peer_device_info_t *peer, const char *device) { xmlNode *child; int verified = 0; device_properties_t *props = pcmk__assert_alloc(1, sizeof(device_properties_t)); int rc = pcmk_rc_ok; /* Add a new entry to this peer's devices list */ g_hash_table_insert(peer->devices, pcmk__str_copy(device), props); /* Peers with verified (monitored) access will be preferred */ pcmk__xe_get_int(xml, PCMK__XA_ST_MONITOR_VERIFIED, &verified); if (verified) { crm_trace("Peer %s has confirmed a verified device %s", peer->host, device); props->verified = TRUE; } // Nodes <2.1.5 won't set this, so assume unfencing in that case rc = pcmk__xe_get_flags(xml, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, &(props->device_support_flags), st_device_supports_on); if (rc != pcmk_rc_ok) { crm_warn("Couldn't determine device support for %s " "(assuming unfencing): %s", device, pcmk_rc_str(rc)); } /* Parse action-specific device properties */ parse_action_specific(xml, peer->host, device, op_requested_action(op), op, st_phase_requested, props); for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { /* Replies for "reboot" operations will include the action-specific * values for "off" and "on" in child elements, just in case the reboot * winds up getting remapped. */ if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_OFF, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF, op, st_phase_off, props); } else if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_ON, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_ON, op, st_phase_on, props); } } } /*! * \internal * \brief Parse a peer's XML query reply and add it to operation's results * * \param[in,out] op Operation that query and reply relate to * \param[in] host Name of peer that sent this reply * \param[in] ndevices Number of devices expected in reply * \param[in] xml XML node containing device list * * \return Newly allocated result structure with parsed reply */ static peer_device_info_t * add_result(remote_fencing_op_t *op, const char *host, int ndevices, const xmlNode *xml) { peer_device_info_t *peer = pcmk__assert_alloc(1, sizeof(peer_device_info_t)); xmlNode *child; peer->host = pcmk__str_copy(host); peer->devices = pcmk__strkey_table(free, free); /* Each child element describes one capable device available to the peer */ for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { const char *device = pcmk__xe_id(child); if (device) { add_device_properties(child, op, peer, device); } } peer->ndevices = g_hash_table_size(peer->devices); CRM_CHECK(ndevices == peer->ndevices, crm_err("Query claimed to have %d device%s but %d found", ndevices, pcmk__plural_s(ndevices), peer->ndevices)); op->query_results = g_list_insert_sorted(op->query_results, peer, sort_peers); return peer; } /*! * \internal * \brief Handle a peer's reply to our fencing query * * Parse a query result from XML and store it in the remote operation * table, and when enough replies have been received, issue a fencing request. * * \param[in] msg XML reply received * * \return pcmk_ok on success, -errno on error * * \note See initiate_remote_stonith_op() for how the XML query was initially * formed, and stonith_query() for how the peer formed its XML reply. */ int process_remote_stonith_query(xmlNode *msg) { int ndevices = 0; gboolean host_is_target = FALSE; gboolean have_all_replies = FALSE; const char *id = NULL; const char *host = NULL; remote_fencing_op_t *op = NULL; peer_device_info_t *peer = NULL; uint32_t replies_expected; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_REMOTE_OP "]", LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); id = pcmk__xe_get(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return -EPROTO); dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_AVAILABLE_DEVICES "]", LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); pcmk__xe_get_int(dev, PCMK__XA_ST_AVAILABLE_DEVICES, &ndevices); op = g_hash_table_lookup(stonith_remote_op_list, id); if (op == NULL) { crm_debug("Received query reply for unknown or expired operation %s", id); return -EOPNOTSUPP; } replies_expected = fencing_active_peers(); if (op->replies_expected < replies_expected) { replies_expected = op->replies_expected; } if ((++op->replies >= replies_expected) && (op->state == st_query)) { have_all_replies = TRUE; } host = pcmk__xe_get(msg, PCMK__XA_SRC); host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei); crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s", op->replies, replies_expected, host, op->target, op->action, ndevices, pcmk__plural_s(ndevices), id); if (ndevices > 0) { peer = add_result(op, host, ndevices, dev); } pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (pcmk_is_set(op->call_options, st_opt_topology)) { /* If we start the fencing before all the topology results are in, * it is possible fencing levels will be skipped because of the missing * query results. */ if (op->state == st_query && all_topology_devices_found(op)) { /* All the query results are in for the topology, start the fencing ops. */ crm_trace("All topology devices found"); request_peer_fencing(op, peer); } else if (have_all_replies) { crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ", replies_expected, op->replies); request_peer_fencing(op, NULL); } } else if (op->state == st_query) { int nverified = count_peer_devices(op, peer, TRUE, fenced_support_flag(op->action)); /* We have a result for a non-topology fencing op that looks promising, * go ahead and start fencing before query timeout */ if ((peer != NULL) && !host_is_target && nverified) { /* we have a verified device living on a peer that is not the target */ crm_trace("Found %d verified device%s", nverified, pcmk__plural_s(nverified)); request_peer_fencing(op, peer); } else if (have_all_replies) { crm_info("All query replies have arrived, continuing (%d expected/%d received) ", replies_expected, op->replies); request_peer_fencing(op, NULL); } else { crm_trace("Waiting for more peer results before launching fencing operation"); } } else if ((peer != NULL) && (op->state == st_done)) { crm_info("Discarding query result from %s (%d device%s): " "Operation is %s", peer->host, peer->ndevices, pcmk__plural_s(peer->ndevices), stonith_op_state_str(op->state)); } return pcmk_ok; } /*! * \internal * \brief Handle a peer's reply to a fencing request * * Parse a fencing reply from XML, and either finalize the operation * or attempt another device as appropriate. * * \param[in] msg XML reply received */ void fenced_process_fencing_reply(xmlNode *msg) { const char *id = NULL; const char *device = NULL; remote_fencing_op_t *op = NULL; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_REMOTE_OP "]", LOG_ERR); pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(dev != NULL, return); id = pcmk__xe_get(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return); dev = stonith__find_xe_with_result(msg); CRM_CHECK(dev != NULL, return); stonith__xe_get_result(dev, &result); device = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (stonith_remote_op_list) { op = g_hash_table_lookup(stonith_remote_op_list, id); } if ((op == NULL) && pcmk__result_ok(&result)) { /* Record successful fencing operations */ const char *client_id = pcmk__xe_get(dev, PCMK__XA_ST_CLIENTID); op = create_remote_stonith_op(client_id, dev, TRUE); } if (op == NULL) { /* Could be for an event that began before we started */ /* TODO: Record the op for later querying */ crm_info("Received peer result of unknown or expired operation %s", id); pcmk__reset_result(&result); return; } pcmk__reset_result(&op->result); op->result = result; // The operation takes ownership of the result if (op->devices && device && !pcmk__str_eq(op->devices->data, device, pcmk__str_casei)) { crm_err("Received outdated reply for device %s (instead of %s) to " "fence (%s) %s. Operation already timed out at peer level.", device, (const char *) op->devices->data, op->action, op->target); return; } if (pcmk__str_eq(pcmk__xe_get(msg, PCMK__XA_SUBT), PCMK__VALUE_BROADCAST, pcmk__str_none)) { if (pcmk__result_ok(&op->result)) { op->state = st_done; } else { op->state = st_failed; } finalize_op(op, msg, false); return; } else if (!pcmk__str_eq(op->originator, fenced_get_local_node(), pcmk__str_casei)) { /* If this isn't a remote level broadcast, and we are not the * originator of the operation, we should not be receiving this msg. */ crm_err("Received non-broadcast fencing result for operation %.8s " "we do not own (device %s targeting %s)", op->id, device, op->target); return; } if (pcmk_is_set(op->call_options, st_opt_topology)) { const char *device = NULL; const char *reason = op->result.exit_reason; /* We own the op, and it is complete. broadcast the result to all nodes * and notify our local clients. */ if (op->state == st_done) { finalize_op(op, msg, false); return; } device = pcmk__xe_get(msg, PCMK__XA_ST_DEVICE_ID); if ((op->phase == 2) && !pcmk__result_ok(&op->result)) { /* A remapped "on" failed, but the node was already turned off * successfully, so ignore the error and continue. */ crm_warn("Ignoring %s 'on' failure (%s%s%s) targeting %s " "after successful 'off'", device, pcmk_exec_status_str(op->result.execution_status), (reason == NULL)? "" : ": ", (reason == NULL)? "" : reason, op->target); pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { crm_notice("Action '%s' targeting %s%s%s on behalf of %s@%s: " "%s%s%s%s", op->action, op->target, ((device == NULL)? "" : " using "), ((device == NULL)? "" : device), op->client_name, op->originator, pcmk_exec_status_str(op->result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); } if (pcmk__result_ok(&op->result)) { /* An operation completed successfully. Try another device if * necessary, otherwise mark the operation as done. */ advance_topology_device_in_level(op, device, msg); return; } else { /* This device failed, time to try another topology level. If no other * levels are available, mark this operation as failed and report results. */ if (advance_topology_level(op, false) != pcmk_rc_ok) { op->state = st_failed; finalize_op(op, msg, false); return; } } } else if (pcmk__result_ok(&op->result) && (op->devices == NULL)) { op->state = st_done; finalize_op(op, msg, false); return; } else if ((op->result.execution_status == PCMK_EXEC_TIMEOUT) && (op->devices == NULL)) { /* If the operation timed out don't bother retrying other peers. */ op->state = st_failed; finalize_op(op, msg, false); return; } else { /* fall-through and attempt other fencing action using another peer */ } /* Retry on failure */ crm_trace("Next for %s on behalf of %s@%s (result was: %s)", op->target, op->originator, op->client_name, pcmk_exec_status_str(op->result.execution_status)); request_peer_fencing(op, NULL); } gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action) { GHashTableIter iter; time_t now = time(NULL); remote_fencing_op_t *rop = NULL; if (tolerance <= 0 || !stonith_remote_op_list || target == NULL || action == NULL) { return FALSE; } g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) { if (strcmp(rop->target, target) != 0) { continue; } else if (rop->state != st_done) { continue; /* We don't have to worry about remapped reboots here * because if state is done, any remapping has been undone */ } else if (strcmp(rop->action, action) != 0) { continue; } else if ((rop->completed + tolerance) < now) { continue; } crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s", target, action, tolerance, rop->delegate, rop->originator); return TRUE; } return FALSE; } diff --git a/daemons/pacemakerd/pcmkd_messages.c b/daemons/pacemakerd/pcmkd_messages.c index fbf15e4fc6..c9d9169ab6 100644 --- a/daemons/pacemakerd/pcmkd_messages.c +++ b/daemons/pacemakerd/pcmkd_messages.c @@ -1,281 +1,281 @@ /* * Copyright 2010-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include "pacemakerd.h" #include #include #include #include #include #include #include #include static GHashTable *pcmkd_handlers = NULL; static xmlNode * handle_node_cache_request(pcmk__request_t *request) { crm_trace("Ignoring request from client %s to purge node " "because peer cache is not used", pcmk__client_name(request->ipc_client)); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_OK); return NULL; } static xmlNode * handle_ping_request(pcmk__request_t *request) { xmlNode *msg = request->xml; const char *value = NULL; xmlNode *ping = NULL; xmlNode *reply = NULL; const char *from = pcmk__xe_get(msg, PCMK__XA_CRM_SYS_FROM); /* Pinged for status */ crm_trace("Pinged from " PCMK__XA_CRM_SYS_FROM "='%s' " PCMK_XA_ORIGIN "='%s'", pcmk__s(from, ""), pcmk__s(pcmk__xe_get(msg, PCMK_XA_ORIGIN), "")); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); ping = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); value = pcmk__xe_get(msg, PCMK__XA_CRM_SYS_TO); crm_xml_add(ping, PCMK__XA_CRM_SUBSYSTEM, value); crm_xml_add(ping, PCMK__XA_PACEMAKERD_STATE, pacemakerd_state); crm_xml_add_ll(ping, PCMK_XA_CRM_TIMESTAMP, (long long) subdaemon_check_progress); crm_xml_add(ping, PCMK_XA_RESULT, "ok"); reply = pcmk__new_reply(msg, ping); pcmk__xml_free(ping); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Failed building ping reply for client %s", pcmk__client_name(request->ipc_client)); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } /* just proceed state on sbd pinging us */ if (from && strstr(from, "sbd")) { if (pcmk__str_eq(pacemakerd_state, PCMK__VALUE_SHUTDOWN_COMPLETE, pcmk__str_none)) { if (pcmk__get_sbd_sync_resource_startup()) { crm_notice("Shutdown-complete-state passed to SBD."); } shutdown_complete_state_reported_to = request->ipc_client->pid; } else if (pcmk__str_eq(pacemakerd_state, PCMK__VALUE_WAIT_FOR_PING, pcmk__str_none)) { crm_notice("Received startup-trigger from SBD."); pacemakerd_state = PCMK__VALUE_STARTING_DAEMONS; mainloop_set_trigger(startup_trigger); } } return reply; } static xmlNode * handle_shutdown_request(pcmk__request_t *request) { xmlNode *msg = request->xml; xmlNode *shutdown = NULL; xmlNode *reply = NULL; /* Only allow privileged users (i.e. root or hacluster) to shut down * Pacemaker from the command line (or direct IPC), so that other users * are forced to go through the CIB and have ACLs applied. */ bool allowed = pcmk_is_set(request->ipc_client->flags, pcmk__client_privileged); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); shutdown = pcmk__xe_create(NULL, PCMK__XE_SHUTDOWN); if (allowed) { crm_notice("Shutting down in response to IPC request %s from %s", pcmk__xe_get(msg, PCMK_XA_REFERENCE), pcmk__xe_get(msg, PCMK_XA_ORIGIN)); crm_xml_add_int(shutdown, PCMK__XA_OP_STATUS, CRM_EX_OK); } else { crm_warn("Ignoring shutdown request from unprivileged client %s", pcmk__client_name(request->ipc_client)); crm_xml_add_int(shutdown, PCMK__XA_OP_STATUS, CRM_EX_INSUFFICIENT_PRIV); } reply = pcmk__new_reply(msg, shutdown); pcmk__xml_free(shutdown); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Failed building shutdown reply for client %s", pcmk__client_name(request->ipc_client)); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } if (allowed) { pcmk_shutdown(15); } return reply; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INVALID_PARAM); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", pcmk__client_name(request->ipc_client)); return NULL; } static void pcmkd_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_RM_NODE_CACHE, handle_node_cache_request }, { CRM_OP_PING, handle_ping_request }, { CRM_OP_QUIT, handle_shutdown_request }, { NULL, handle_unknown_request }, }; pcmkd_handlers = pcmk__register_handlers(handlers); } static int32_t pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } /* Error code means? */ static int32_t pcmk_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); if (shutdown_complete_state_reported_to == client->pid) { shutdown_complete_state_reported_client_closed = TRUE; if (shutdown_trigger) { mainloop_set_trigger(shutdown_trigger); } } pcmk__free_client(client); return 0; } static void pcmk_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pcmk_ipc_closed(c); } /* Exit code means? */ static int32_t pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; xmlNode *msg = NULL; pcmk__client_t *c = pcmk__find_client(qbc); CRM_CHECK(c != NULL, return 0); if (pcmkd_handlers == NULL) { pcmkd_register_handlers(); } msg = pcmk__client_data2xml(c, data, &id, &flags); if (msg == NULL) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } else { char *log_msg = NULL; const char *reason = NULL; xmlNode *reply = NULL; pcmk__request_t request = { .ipc_client = c, .ipc_id = id, .ipc_flags = flags, .peer = NULL, .xml = msg, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, PCMK__XA_CRM_TASK); + request.op = pcmk__xe_get_copy(request.xml, PCMK__XA_CRM_TASK); CRM_CHECK(request.op != NULL, return 0); reply = pcmk__process_request(&request, pcmkd_handlers); if (reply != NULL) { pcmk__ipc_send_xml(c, id, reply, crm_ipc_server_event); pcmk__xml_free(reply); } reason = request.result.exit_reason; log_msg = crm_strdup_printf("Processed %s request from %s %s: %s%s%s%s", request.op, pcmk__request_origin_type(&request), pcmk__request_origin(&request), pcmk_exec_status_str(request.result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); if (!pcmk__result_ok(&request.result)) { crm_warn("%s", log_msg); } else { crm_debug("%s", log_msg); } free(log_msg); pcmk__reset_request(&request); } pcmk__xml_free(msg); return 0; } struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks = { .connection_accept = pcmk_ipc_accept, .connection_created = NULL, .msg_process = pcmk_ipc_dispatch, .connection_closed = pcmk_ipc_closed, .connection_destroyed = pcmk_ipc_destroy }; diff --git a/daemons/schedulerd/schedulerd_messages.c b/daemons/schedulerd/schedulerd_messages.c index 366c299694..24f44a16cd 100644 --- a/daemons/schedulerd/schedulerd_messages.c +++ b/daemons/schedulerd/schedulerd_messages.c @@ -1,334 +1,334 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include "pacemaker-schedulerd.h" static GHashTable *schedulerd_handlers = NULL; static pcmk_scheduler_t * init_scheduler(void) { pcmk_scheduler_t *scheduler = pcmk_new_scheduler(); pcmk__mem_assert(scheduler); scheduler->priv->out = logger_out; return scheduler; } static xmlNode * handle_pecalc_request(pcmk__request_t *request) { static struct series_s { const char *name; const char *param; /* Maximum number of inputs of this kind to save to disk. * If -1, save all; if 0, save none. */ int wrap; } series[] = { { "pe-error", PCMK_OPT_PE_ERROR_SERIES_MAX, -1 }, { "pe-warn", PCMK_OPT_PE_WARN_SERIES_MAX, 5000 }, { "pe-input", PCMK_OPT_PE_INPUT_SERIES_MAX, 4000 }, }; xmlNode *msg = request->xml; xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *xml_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); static char *last_digest = NULL; static char *filename = NULL; unsigned int seq = 0U; int series_id = 0; int series_wrap = 0; char *digest = NULL; const char *value = NULL; time_t execution_date = time(NULL); xmlNode *converted = NULL; xmlNode *reply = NULL; bool is_repoke = false; bool process = true; pcmk_scheduler_t *scheduler = init_scheduler(); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); digest = pcmk__digest_xml(xml_data, false); converted = pcmk__xml_copy(NULL, xml_data); if (pcmk__update_configured_schema(&converted, true) != pcmk_rc_ok) { scheduler->priv->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH); crm_xml_add_int(scheduler->priv->graph, "transition_id", 0); crm_xml_add_int(scheduler->priv->graph, PCMK_OPT_CLUSTER_DELAY, 0); process = false; free(digest); } else if (pcmk__str_eq(digest, last_digest, pcmk__str_casei)) { is_repoke = true; free(digest); } else { free(last_digest); last_digest = digest; } if (process) { scheduler->input = converted; pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts |pcmk__sched_show_utilization); pcmk__schedule_actions(scheduler); // Don't free converted as part of scheduler scheduler->input = NULL; } // Get appropriate index into series[] array if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_error) || pcmk__config_has_error) { series_id = 0; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_warning) || pcmk__config_has_warning) { series_id = 1; } else { series_id = 2; } value = pcmk__cluster_option(scheduler->priv->options, series[series_id].param); if ((value == NULL) || (pcmk__scan_min_int(value, &series_wrap, -1) != pcmk_rc_ok)) { series_wrap = series[series_id].wrap; } if (pcmk__read_series_sequence(PCMK_SCHEDULER_INPUT_DIR, series[series_id].name, &seq) != pcmk_rc_ok) { // @TODO maybe handle errors better ... seq = 0U; } crm_trace("Series %s: wrap=%d, seq=%u, pref=%s", series[series_id].name, series_wrap, seq, value); reply = pcmk__new_reply(msg, scheduler->priv->graph); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Failed building ping reply for client %s", pcmk__client_name(request->ipc_client)); goto done; } if (series_wrap == 0) { // Don't save any inputs of this kind free(filename); filename = NULL; } else if (!is_repoke) { // Input changed, save to disk free(filename); filename = pcmk__series_filename(PCMK_SCHEDULER_INPUT_DIR, series[series_id].name, seq, true); } crm_xml_add(reply, PCMK__XA_CRM_TGRAPH_IN, filename); pcmk__log_transition_summary(scheduler, filename); if (series_wrap == 0) { crm_debug("Not saving input to disk (disabled by configuration)"); } else if (is_repoke) { crm_info("Input has not changed since last time, not saving to disk"); } else { unlink(filename); crm_xml_add_ll(xml_data, PCMK_XA_EXECUTION_DATE, (long long) execution_date); pcmk__xml_write_file(xml_data, filename, true); pcmk__write_series_sequence(PCMK_SCHEDULER_INPUT_DIR, series[series_id].name, ++seq, series_wrap); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); done: pcmk__xml_free(converted); pcmk_free_scheduler(scheduler); return reply; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INVALID_PARAM); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", pcmk__client_name(request->ipc_client)); return NULL; } static xmlNode * handle_hello_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_trace("Received IPC hello from %s", pcmk__client_name(request->ipc_client)); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static void schedulerd_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_HELLO, handle_hello_request }, { CRM_OP_PECALC, handle_pecalc_request }, { NULL, handle_unknown_request }, }; schedulerd_handlers = pcmk__register_handlers(handlers); } static int32_t pe_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } static int32_t pe_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; xmlNode *msg = NULL; pcmk__client_t *c = pcmk__find_client(qbc); const char *sys_to = NULL; CRM_CHECK(c != NULL, return 0); if (schedulerd_handlers == NULL) { schedulerd_register_handlers(); } msg = pcmk__client_data2xml(c, data, &id, &flags); if (msg == NULL) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } sys_to = pcmk__xe_get(msg, PCMK__XA_CRM_SYS_TO); if (pcmk__str_eq(pcmk__xe_get(msg, PCMK__XA_SUBT), PCMK__VALUE_RESPONSE, pcmk__str_none)) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_info("Ignoring IPC reply from %s", pcmk__client_name(c)); } else if (!pcmk__str_eq(sys_to, CRM_SYSTEM_PENGINE, pcmk__str_none)) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_info("Ignoring invalid IPC message: to '%s' not " CRM_SYSTEM_PENGINE, pcmk__s(sys_to, "")); } else { char *log_msg = NULL; const char *reason = NULL; xmlNode *reply = NULL; pcmk__request_t request = { .ipc_client = c, .ipc_id = id, .ipc_flags = flags, .peer = NULL, .xml = msg, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; - request.op = crm_element_value_copy(request.xml, PCMK__XA_CRM_TASK); + request.op = pcmk__xe_get_copy(request.xml, PCMK__XA_CRM_TASK); CRM_CHECK(request.op != NULL, return 0); reply = pcmk__process_request(&request, schedulerd_handlers); if (reply != NULL) { pcmk__ipc_send_xml(c, id, reply, crm_ipc_server_event); pcmk__xml_free(reply); } reason = request.result.exit_reason; log_msg = crm_strdup_printf("Processed %s request from %s %s: %s%s%s%s", request.op, pcmk__request_origin_type(&request), pcmk__request_origin(&request), pcmk_exec_status_str(request.result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); if (!pcmk__result_ok(&request.result)) { crm_warn("%s", log_msg); } else { crm_debug("%s", log_msg); } free(log_msg); pcmk__reset_request(&request); } pcmk__xml_free(msg); return 0; } /* Error code means? */ static int32_t pe_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); pcmk__free_client(client); return 0; } static void pe_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pe_ipc_closed(c); } struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = pe_ipc_accept, .connection_created = NULL, .msg_process = pe_ipc_dispatch, .connection_closed = pe_ipc_closed, .connection_destroyed = pe_ipc_destroy }; diff --git a/include/crm/common/xml_element_internal.h b/include/crm/common/xml_element_internal.h index 28dfe457a6..d5a31b1a10 100644 --- a/include/crm/common/xml_element_internal.h +++ b/include/crm/common/xml_element_internal.h @@ -1,176 +1,196 @@ /* * Copyright 2017-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_XML_ELEMENT_INTERNAL__H #define PCMK__CRM_COMMON_XML_ELEMENT_INTERNAL__H /* * Internal-only wrappers for and extensions to libxml2 for processing XML * elements */ #include // bool #include // uint32_t #include // NULL #include // strcmp() #include // guint #include // xmlNode, etc. #include // crm_time_t +#include // pcmk__str_copy() #include // PCMK_XA_ID #ifdef __cplusplus extern "C" { #endif const char *pcmk__xe_add_last_written(xmlNode *xe); xmlNode *pcmk__xe_first_child(const xmlNode *parent, const char *node_name, const char *attr_n, const char *attr_v); void pcmk__xe_remove_attr(xmlNode *element, const char *name); bool pcmk__xe_remove_attr_cb(xmlNode *xml, void *user_data); void pcmk__xe_remove_matching_attrs(xmlNode *element, bool force, bool (*match)(xmlAttrPtr, void *), void *user_data); int pcmk__xe_delete_match(xmlNode *xml, xmlNode *search); int pcmk__xe_replace_match(xmlNode *xml, xmlNode *replace); int pcmk__xe_update_match(xmlNode *xml, xmlNode *update, uint32_t flags); /*! * \internal * \brief Check whether an XML element is of a particular type * * \param[in] xml XML element to compare * \param[in] name XML element name to compare * * \return \c true if \p xml is of type \p name, otherwise \c false */ static inline bool pcmk__xe_is(const xmlNode *xml, const char *name) { return (xml != NULL) && (xml->name != NULL) && (name != NULL) && (strcmp((const char *) xml->name, name) == 0); } xmlNode *pcmk__xe_create(xmlNode *parent, const char *name); xmlNode *pcmk__xe_next(const xmlNode *node, const char *element_name); void pcmk__xe_set_content(xmlNode *node, const char *format, ...) G_GNUC_PRINTF(2, 3); int pcmk__xe_get_score(const xmlNode *xml, const char *name, int *score, int default_score); int pcmk__xe_copy_attrs(xmlNode *target, const xmlNode *src, uint32_t flags); void pcmk__xe_sort_attrs(xmlNode *xml); void pcmk__xe_set_id(xmlNode *xml, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief Like pcmk__xe_set_props, but takes a va_list instead of * arguments directly. * * \param[in,out] node XML to add attributes to * \param[in] pairs NULL-terminated list of name/value pairs to add */ void pcmk__xe_set_propv(xmlNodePtr node, va_list pairs); /*! * \internal * \brief Add a NULL-terminated list of name/value pairs to the given * XML node as properties. * * \param[in,out] node XML node to add properties to * \param[in] ... NULL-terminated list of name/value pairs * * \note A NULL name terminates the arguments; a NULL value will be skipped. */ void pcmk__xe_set_props(xmlNodePtr node, ...) G_GNUC_NULL_TERMINATED; /*! * \internal * \brief Get first attribute of an XML element * * \param[in] xe XML element to check * * \return First attribute of \p xe (or NULL if \p xe is NULL or has none) */ static inline xmlAttr * pcmk__xe_first_attr(const xmlNode *xe) { return (xe == NULL)? NULL : xe->properties; } /*! * \internal * \brief Iterate over child elements of \p xml * * This function iterates over the children of \p xml, performing the * callback function \p handler on each node. If the callback returns * a value other than pcmk_rc_ok, the iteration stops and the value is * returned. It is therefore possible that not all children will be * visited. * * \param[in,out] xml The starting XML node. Can be NULL. * \param[in] child_element_name The name that the node must match in order * for \p handler to be run. If NULL, all * child elements will match. * \param[in] handler The callback function. * \param[in,out] userdata User data to pass to the callback function. * Can be NULL. * * \return Standard Pacemaker return code */ int pcmk__xe_foreach_child(xmlNode *xml, const char *child_element_name, int (*handler)(xmlNode *xml, void *userdata), void *userdata); const char *pcmk__xe_get(const xmlNode *xml, const char *attr_name); int pcmk__xe_get_datetime(const xmlNode *xml, const char *attr, crm_time_t **t); int pcmk__xe_get_flags(const xmlNode *xml, const char *name, uint32_t *dest, uint32_t default_value); int pcmk__xe_get_guint(const xmlNode *xml, const char *attr, guint *dest); int pcmk__xe_get_int(const xmlNode *xml, const char *attr, int *dest); int pcmk__xe_get_ll(const xmlNode *xml, const char *attr, long long *dest); int pcmk__xe_get_time(const xmlNode *xml, const char *attr, time_t *dest); int pcmk__xe_get_timeval(const xmlNode *xml, const char *sec_attr, const char *usec_attr, struct timeval *dest); void pcmk__xe_set_bool_attr(xmlNodePtr node, const char *name, bool value); int pcmk__xe_get_bool_attr(const xmlNode *node, const char *name, bool *value); bool pcmk__xe_attr_is_true(const xmlNode *node, const char *name); +/*! + * \internal + * \brief Retrieve a copy of the value of an XML attribute + * + * This is like \c pcmk__xe_get() but allocates new memory for the result. + * + * \param[in] xml XML element whose attribute to get + * \param[in] attr Attribute name + * + * \return Value of specified attribute (or \c NULL if not set) + * + * \note The caller is responsible for freeing the result using \c free(). + */ +static inline char * +pcmk__xe_get_copy(const xmlNode *xml, const char *attr) +{ + return pcmk__str_copy(pcmk__xe_get(xml, attr)); +} + /*! * \internal * \brief Retrieve the value of the \c PCMK_XA_ID XML attribute * * \param[in] xml XML element to check * * \return Value of the \c PCMK_XA_ID attribute (may be \c NULL) */ static inline const char * pcmk__xe_id(const xmlNode *xml) { return pcmk__xe_get(xml, PCMK_XA_ID); } #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_XML_ELEMENT_INTERNAL__H diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c index 992cf2ea83..2839317b8c 100644 --- a/lib/cib/cib_attrs.c +++ b/lib/cib/cib_attrs.c @@ -1,664 +1,664 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include static pcmk__output_t * new_output_object(const char *ty) { int rc = pcmk_rc_ok; pcmk__output_t *out = NULL; const char* argv[] = { "", NULL }; pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_LOG, PCMK__SUPPORTED_FORMAT_TEXT, { NULL, NULL, NULL } }; pcmk__register_formats(NULL, formats); rc = pcmk__output_new(&out, ty, NULL, (char**)argv); if ((rc != pcmk_rc_ok) || (out == NULL)) { crm_err("Can't out due to internal error: %s", pcmk_rc_str(rc)); return NULL; } return out; } static int find_attr(cib_t *cib, const char *section, const char *node_uuid, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *user_name, xmlNode **result) { int rc = pcmk_rc_ok; const char *xpath_base = NULL; GString *xpath = NULL; xmlNode *xml_search = NULL; const char *set_type = NULL; const char *node_type = NULL; if (attr_set_type) { set_type = attr_set_type; } else { set_type = PCMK_XE_INSTANCE_ATTRIBUTES; } if (pcmk__str_eq(section, PCMK_XE_CRM_CONFIG, pcmk__str_casei)) { node_uuid = NULL; set_type = PCMK_XE_CLUSTER_PROPERTY_SET; } else if (pcmk__strcase_any_of(section, PCMK_XE_OP_DEFAULTS, PCMK_XE_RSC_DEFAULTS, NULL)) { node_uuid = NULL; set_type = PCMK_XE_META_ATTRIBUTES; } else if (pcmk__str_eq(section, PCMK_XE_TICKETS, pcmk__str_casei)) { node_uuid = NULL; section = PCMK_XE_STATUS; node_type = PCMK_XE_TICKETS; } else if (node_uuid == NULL) { return EINVAL; } xpath_base = pcmk_cib_xpath_for(section); if (xpath_base == NULL) { crm_warn("%s CIB section not known", section); return ENOMSG; } xpath = g_string_sized_new(1024); g_string_append(xpath, xpath_base); if (pcmk__str_eq(node_type, PCMK_XE_TICKETS, pcmk__str_casei)) { pcmk__g_strcat(xpath, "//", node_type, NULL); } else if (node_uuid) { const char *node_type = PCMK_XE_NODE; if (pcmk__str_eq(section, PCMK_XE_STATUS, pcmk__str_casei)) { node_type = PCMK__XE_NODE_STATE; set_type = PCMK__XE_TRANSIENT_ATTRIBUTES; } pcmk__g_strcat(xpath, "//", node_type, "[@" PCMK_XA_ID "='", node_uuid, "']", NULL); } pcmk__g_strcat(xpath, "//", set_type, NULL); if (set_name) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", set_name, "']", NULL); } g_string_append(xpath, "//nvpair"); if (attr_id && attr_name) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "' " "and @" PCMK_XA_NAME "='", attr_name, "']", NULL); } else if (attr_id) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL); } else if (attr_name) { pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL); } rc = cib_internal_op(cib, PCMK__CIB_REQUEST_QUERY, NULL, (const char *) xpath->str, NULL, &xml_search, cib_sync_call|cib_xpath, user_name); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { crm_trace("Query failed for attribute %s (section=%s, node=%s, set=%s, xpath=%s): %s", attr_name, section, pcmk__s(node_uuid, ""), pcmk__s(set_name, ""), (const char *) xpath->str, pcmk_rc_str(rc)); } else { crm_log_xml_debug(xml_search, "Match"); } g_string_free(xpath, TRUE); *result = xml_search; return rc; } static int handle_multiples(pcmk__output_t *out, xmlNode *search, const char *attr_name) { if ((search != NULL) && (search->children != NULL)) { pcmk__warn_multiple_name_matches(out, search, attr_name); return ENOTUNIQ; } else { return pcmk_rc_ok; } } int cib__update_node_attr(pcmk__output_t *out, cib_t *cib, int call_options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, const char *user_name, const char *node_type) { const char *tag = NULL; int rc = pcmk_rc_ok; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; char *local_attr_id = NULL; char *local_set_name = NULL; CRM_CHECK((out != NULL) && (cib != NULL) && (section != NULL) && ((attr_id != NULL) || (attr_name != NULL)) && (attr_value != NULL), return EINVAL); rc = find_attr(cib, section, node_uuid, set_type, set_name, attr_id, attr_name, user_name, &xml_search); if (rc == pcmk_rc_ok) { if (handle_multiples(out, xml_search, attr_name) == ENOTUNIQ) { pcmk__xml_free(xml_search); return ENOTUNIQ; } else { - local_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); + local_attr_id = pcmk__xe_get_copy(xml_search, PCMK_XA_ID); attr_id = local_attr_id; pcmk__xml_free(xml_search); goto do_modify; } } else if (rc != ENXIO) { pcmk__xml_free(xml_search); return rc; /* } else if(attr_id == NULL) { */ /* return EINVAL; */ } else { pcmk__xml_free(xml_search); crm_trace("%s does not exist, create it", attr_name); if (pcmk__str_eq(section, PCMK_XE_TICKETS, pcmk__str_casei)) { node_uuid = NULL; section = PCMK_XE_STATUS; node_type = PCMK_XE_TICKETS; xml_top = pcmk__xe_create(xml_obj, PCMK_XE_STATUS); xml_obj = pcmk__xe_create(xml_top, PCMK_XE_TICKETS); } else if (pcmk__str_eq(section, PCMK_XE_NODES, pcmk__str_casei)) { if (node_uuid == NULL) { return EINVAL; } if (pcmk__str_eq(node_type, PCMK_VALUE_REMOTE, pcmk__str_casei)) { xml_top = pcmk__xe_create(xml_obj, PCMK_XE_NODES); xml_obj = pcmk__xe_create(xml_top, PCMK_XE_NODE); crm_xml_add(xml_obj, PCMK_XA_TYPE, PCMK_VALUE_REMOTE); crm_xml_add(xml_obj, PCMK_XA_ID, node_uuid); crm_xml_add(xml_obj, PCMK_XA_UNAME, node_uuid); } else { tag = PCMK_XE_NODE; } } else if (pcmk__str_eq(section, PCMK_XE_STATUS, pcmk__str_casei)) { tag = PCMK__XE_TRANSIENT_ATTRIBUTES; if (node_uuid == NULL) { return EINVAL; } xml_top = pcmk__xe_create(xml_obj, PCMK__XE_NODE_STATE); crm_xml_add(xml_top, PCMK_XA_ID, node_uuid); xml_obj = xml_top; } else { tag = section; node_uuid = NULL; } if (set_name == NULL) { if (pcmk__str_eq(section, PCMK_XE_CRM_CONFIG, pcmk__str_casei)) { local_set_name = pcmk__str_copy(PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS); } else if (pcmk__str_eq(node_type, PCMK_XE_TICKETS, pcmk__str_casei)) { local_set_name = crm_strdup_printf("%s-%s", section, PCMK_XE_TICKETS); } else if (node_uuid) { local_set_name = crm_strdup_printf("%s-%s", section, node_uuid); if (set_type) { char *tmp_set_name = local_set_name; local_set_name = crm_strdup_printf("%s-%s", tmp_set_name, set_type); free(tmp_set_name); } } else { local_set_name = crm_strdup_printf("%s-options", section); } set_name = local_set_name; } if (attr_id == NULL) { local_attr_id = crm_strdup_printf("%s-%s", set_name, attr_name); pcmk__xml_sanitize_id(local_attr_id); attr_id = local_attr_id; } else if (attr_name == NULL) { attr_name = attr_id; } crm_trace("Creating %s/%s", section, tag); if (tag != NULL) { xml_obj = pcmk__xe_create(xml_obj, tag); crm_xml_add(xml_obj, PCMK_XA_ID, node_uuid); if (xml_top == NULL) { xml_top = xml_obj; } } if ((node_uuid == NULL) && !pcmk__str_eq(node_type, PCMK_XE_TICKETS, pcmk__str_casei)) { if (pcmk__str_eq(section, PCMK_XE_CRM_CONFIG, pcmk__str_casei)) { xml_obj = pcmk__xe_create(xml_obj, PCMK_XE_CLUSTER_PROPERTY_SET); } else { xml_obj = pcmk__xe_create(xml_obj, PCMK_XE_META_ATTRIBUTES); } } else if (set_type) { xml_obj = pcmk__xe_create(xml_obj, set_type); } else { xml_obj = pcmk__xe_create(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES); } crm_xml_add(xml_obj, PCMK_XA_ID, set_name); if (xml_top == NULL) { xml_top = xml_obj; } } do_modify: xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_trace(xml_top, "update_attr"); rc = cib_internal_op(cib, PCMK__CIB_REQUEST_MODIFY, NULL, section, xml_top, NULL, call_options, user_name); if (!pcmk_is_set(call_options, cib_sync_call) && (cib->variant != cib_file) && (rc >= 0)) { // For async call, positive rc is the call ID (file always synchronous) rc = pcmk_rc_ok; } else { rc = pcmk_legacy2rc(rc); } if (rc != pcmk_rc_ok) { out->err(out, "Error setting %s=%s (section=%s, set=%s): %s", attr_name, attr_value, section, pcmk__s(set_name, ""), pcmk_rc_str(rc)); crm_log_xml_info(xml_top, "Update"); } free(local_set_name); free(local_attr_id); pcmk__xml_free(xml_top); return rc; } int cib__get_node_attrs(pcmk__output_t *out, cib_t *cib, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *user_name, xmlNode **result) { int rc = pcmk_rc_ok; pcmk__assert(result != NULL); CRM_CHECK(section != NULL, return EINVAL); *result = NULL; rc = find_attr(cib, section, node_uuid, set_type, set_name, attr_id, attr_name, user_name, result); if (rc != pcmk_rc_ok) { crm_trace("Query failed for attribute %s (section=%s node=%s set=%s): %s", pcmk__s(attr_name, "with unspecified name"), section, pcmk__s(set_name, ""), pcmk__s(node_uuid, ""), pcmk_rc_str(rc)); } return rc; } int cib__delete_node_attr(pcmk__output_t *out, cib_t *cib, int options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, const char *user_name) { int rc = pcmk_rc_ok; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; char *local_attr_id = NULL; CRM_CHECK(section != NULL, return EINVAL); CRM_CHECK(attr_name != NULL || attr_id != NULL, return EINVAL); if (attr_id == NULL) { rc = find_attr(cib, section, node_uuid, set_type, set_name, attr_id, attr_name, user_name, &xml_search); if (rc != pcmk_rc_ok || handle_multiples(out, xml_search, attr_name) == ENOTUNIQ) { pcmk__xml_free(xml_search); return rc; } else { - local_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); + local_attr_id = pcmk__xe_get_copy(xml_search, PCMK_XA_ID); attr_id = local_attr_id; pcmk__xml_free(xml_search); } } xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, attr_value); rc = cib_internal_op(cib, PCMK__CIB_REQUEST_DELETE, NULL, section, xml_obj, NULL, options, user_name); if (!pcmk_is_set(options, cib_sync_call) && (cib->variant != cib_file) && (rc >= 0)) { // For async call, positive rc is the call ID (file always synchronous) rc = pcmk_rc_ok; } else { rc = pcmk_legacy2rc(rc); } if (rc == pcmk_rc_ok) { out->info(out, "Deleted %s %s: id=%s%s%s%s%s", section, node_uuid ? "attribute" : "option", local_attr_id, set_name ? " set=" : "", set_name ? set_name : "", attr_name ? " name=" : "", attr_name ? attr_name : ""); } free(local_attr_id); pcmk__xml_free(xml_obj); return rc; } int find_nvpair_attr_delegate(cib_t *cib, const char *attr, const char *section, const char *node_uuid, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, gboolean to_console, char **value, const char *user_name) { pcmk__output_t *out = NULL; xmlNode *xml_search = NULL; int rc = pcmk_ok; out = new_output_object(to_console ? "text" : "log"); if (out == NULL) { return pcmk_err_generic; } rc = find_attr(cib, section, node_uuid, attr_set_type, set_name, attr_id, attr_name, user_name, &xml_search); if (rc == pcmk_rc_ok) { rc = handle_multiples(out, xml_search, attr_name); if (rc == pcmk_rc_ok) { pcmk__str_update(value, pcmk__xe_get(xml_search, attr)); } } out->finish(out, CRM_EX_OK, true, NULL); pcmk__xml_free(xml_search); pcmk__output_free(out); return pcmk_rc2legacy(rc); } int update_attr_delegate(cib_t *cib, int call_options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, gboolean to_console, const char *user_name, const char *node_type) { pcmk__output_t *out = NULL; int rc = pcmk_ok; out = new_output_object(to_console ? "text" : "log"); if (out == NULL) { return pcmk_err_generic; } rc = cib__update_node_attr(out, cib, call_options, section, node_uuid, set_type, set_name, attr_id, attr_name, attr_value, user_name, node_type); out->finish(out, CRM_EX_OK, true, NULL); pcmk__output_free(out); return pcmk_rc2legacy(rc); } int read_attr_delegate(cib_t *cib, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, char **attr_value, gboolean to_console, const char *user_name) { pcmk__output_t *out = NULL; xmlNode *result = NULL; int rc = pcmk_ok; out = new_output_object(to_console ? "text" : "log"); if (out == NULL) { return pcmk_err_generic; } rc = cib__get_node_attrs(out, cib, section, node_uuid, set_type, set_name, attr_id, attr_name, user_name, &result); if (rc == pcmk_rc_ok) { if (result->children == NULL) { pcmk__str_update(attr_value, pcmk__xe_get(result, PCMK_XA_VALUE)); } else { rc = ENOTUNIQ; } } out->finish(out, CRM_EX_OK, true, NULL); pcmk__xml_free(result); pcmk__output_free(out); return pcmk_rc2legacy(rc); } int delete_attr_delegate(cib_t *cib, int options, const char *section, const char *node_uuid, const char *set_type, const char *set_name, const char *attr_id, const char *attr_name, const char *attr_value, gboolean to_console, const char *user_name) { pcmk__output_t *out = NULL; int rc = pcmk_ok; out = new_output_object(to_console ? "text" : "log"); if (out == NULL) { return pcmk_err_generic; } rc = cib__delete_node_attr(out, cib, options, section, node_uuid, set_type, set_name, attr_id, attr_name, attr_value, user_name); out->finish(out, CRM_EX_OK, true, NULL); pcmk__output_free(out); return pcmk_rc2legacy(rc); } /*! * \internal * \brief Parse node UUID from search result * * \param[in] result XML search result * \param[out] uuid If non-NULL, where to store parsed UUID * \param[out] is_remote If non-NULL, set TRUE if result is remote node * * \return pcmk_ok if UUID was successfully parsed, -ENXIO otherwise */ static int get_uuid_from_result(const xmlNode *result, char **uuid, int *is_remote) { int rc = -ENXIO; const char *parsed_uuid = NULL; int parsed_is_remote = FALSE; if (result == NULL) { return rc; } /* If there are multiple results, the first is sufficient */ if (pcmk__xe_is(result, PCMK__XE_XPATH_QUERY)) { result = pcmk__xe_first_child(result, NULL, NULL, NULL); CRM_CHECK(result != NULL, return rc); } if (pcmk__xe_is(result, PCMK_XE_NODE)) { // Result is PCMK_XE_NODE element from PCMK_XE_NODES section if (pcmk__str_eq(pcmk__xe_get(result, PCMK_XA_TYPE), PCMK_VALUE_REMOTE, pcmk__str_casei)) { parsed_uuid = pcmk__xe_get(result, PCMK_XA_UNAME); parsed_is_remote = TRUE; } else { parsed_uuid = pcmk__xe_id(result); parsed_is_remote = FALSE; } } else if (pcmk__xe_is(result, PCMK_XE_PRIMITIVE)) { /* Result is for ocf:pacemaker:remote resource */ parsed_uuid = pcmk__xe_id(result); parsed_is_remote = TRUE; } else if (pcmk__xe_is(result, PCMK_XE_NVPAIR)) { /* Result is PCMK_META_REMOTE_NODE parameter of for guest * node */ parsed_uuid = pcmk__xe_get(result, PCMK_XA_VALUE); parsed_is_remote = TRUE; } else if (pcmk__xe_is(result, PCMK__XE_NODE_STATE)) { // Result is PCMK__XE_NODE_STATE element from PCMK_XE_STATUS section parsed_uuid = pcmk__xe_get(result, PCMK_XA_UNAME); if (pcmk__xe_attr_is_true(result, PCMK_XA_REMOTE_NODE)) { parsed_is_remote = TRUE; } } if (parsed_uuid) { if (uuid) { *uuid = strdup(parsed_uuid); } if (is_remote) { *is_remote = parsed_is_remote; } rc = pcmk_ok; } return rc; } /* Search string to find a node by name, as: * - cluster or remote node in nodes section * - remote node in resources section * - guest node in resources section * - orphaned remote node or bundle guest node in status section */ #define XPATH_UPPER_TRANS "ABCDEFGHIJKLMNOPQRSTUVWXYZ" #define XPATH_LOWER_TRANS "abcdefghijklmnopqrstuvwxyz" #define XPATH_NODE \ "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_NODES \ "/" PCMK_XE_NODE "[translate(@" PCMK_XA_UNAME ",'" XPATH_UPPER_TRANS "','" XPATH_LOWER_TRANS "') ='%s']" \ "|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \ "/" PCMK_XE_PRIMITIVE \ "[@class='ocf'][@provider='pacemaker'][@type='remote'][translate(@id,'" XPATH_UPPER_TRANS "','" XPATH_LOWER_TRANS "') ='%s']" \ "|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES \ "/" PCMK_XE_PRIMITIVE "/" PCMK_XE_META_ATTRIBUTES "/" PCMK_XE_NVPAIR \ "[@name='" PCMK_META_REMOTE_NODE "'][translate(@value,'" XPATH_UPPER_TRANS "','" XPATH_LOWER_TRANS "') ='%s']" \ "|/" PCMK_XE_CIB "/" PCMK_XE_STATUS "/" PCMK__XE_NODE_STATE \ "[@" PCMK_XA_REMOTE_NODE "='true'][translate(@" PCMK_XA_ID ",'" XPATH_UPPER_TRANS "','" XPATH_LOWER_TRANS "') ='%s']" int query_node_uuid(cib_t * the_cib, const char *uname, char **uuid, int *is_remote_node) { int rc = pcmk_ok; char *xpath_string; xmlNode *xml_search = NULL; char *host_lowercase = NULL; pcmk__assert(uname != NULL); host_lowercase = g_ascii_strdown(uname, -1); if (uuid) { *uuid = NULL; } if (is_remote_node) { *is_remote_node = FALSE; } xpath_string = crm_strdup_printf(XPATH_NODE, host_lowercase, host_lowercase, host_lowercase, host_lowercase); if (cib_internal_op(the_cib, PCMK__CIB_REQUEST_QUERY, NULL, xpath_string, NULL, &xml_search, cib_sync_call|cib_xpath, NULL) == pcmk_ok) { rc = get_uuid_from_result(xml_search, uuid, is_remote_node); } else { rc = -ENXIO; } free(xpath_string); pcmk__xml_free(xml_search); g_free(host_lowercase); if (rc != pcmk_ok) { crm_debug("Could not map node name '%s' to a UUID: %s", uname, pcmk_strerror(rc)); } else { crm_info("Mapped node name '%s' to UUID %s", uname, (uuid? *uuid : "")); } return rc; } diff --git a/lib/cib/cib_native.c b/lib/cib/cib_native.c index 2c5708d0ac..3df5e72649 100644 --- a/lib/cib/cib_native.c +++ b/lib/cib/cib_native.c @@ -1,486 +1,485 @@ /* * Copyright 2004 International Business Machines * Later changes copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include typedef struct cib_native_opaque_s { char *token; crm_ipc_t *ipc; void (*dnotify_fn) (gpointer user_data); mainloop_io_t *source; } cib_native_opaque_t; static int cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host, const char *section, xmlNode *data, xmlNode **output_data, int call_options, const char *user_name) { int rc = pcmk_ok; int reply_id = 0; enum crm_ipc_flags ipc_flags = crm_ipc_flags_none; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; cib_native_opaque_t *native = cib->variant_opaque; if (cib->state == cib_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } if (call_options & cib_sync_call) { pcmk__set_ipc_flags(ipc_flags, "client", crm_ipc_client_response); } rc = cib__create_op(cib, op, host, section, data, call_options, user_name, NULL, &op_msg); if (rc != pcmk_ok) { return rc; } if (pcmk_is_set(call_options, cib_transaction)) { rc = cib__extend_transaction(cib, op_msg); goto done; } crm_trace("Sending %s message to the CIB manager (timeout=%ds)", op, cib->call_timeout); rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, cib->call_timeout * 1000, &op_reply); if (rc < 0) { crm_err("Couldn't perform %s operation (timeout=%ds): %s (%d)", op, cib->call_timeout, pcmk_strerror(rc), rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & cib_sync_call)) { crm_trace("Async call, returning %d", cib->call_id); CRM_CHECK(cib->call_id != 0, rc = -ENOMSG; goto done); rc = cib->call_id; goto done; } rc = pcmk_ok; pcmk__xe_get_int(op_reply, PCMK__XA_CIB_CALLID, &reply_id); if (reply_id == cib->call_id) { xmlNode *wrapper = pcmk__xe_first_child(op_reply, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *tmp = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); crm_trace("Synchronous reply %d received", reply_id); if (pcmk__xe_get_int(op_reply, PCMK__XA_CIB_RC, &rc) != pcmk_rc_ok) { rc = -EPROTO; } if (output_data == NULL || (call_options & cib_discard_reply)) { crm_trace("Discarding reply"); } else { *output_data = pcmk__xml_copy(NULL, tmp); } } else if (reply_id <= 0) { crm_err("Received bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); rc = -ENOMSG; goto done; } else { crm_err("Received bad reply: %d (wanted %d)", reply_id, cib->call_id); crm_log_xml_err(op_reply, "Old reply"); rc = -ENOMSG; goto done; } if (op_reply == NULL && cib->state == cib_disconnected) { rc = -ENOTCONN; } else if (rc == pcmk_ok && op_reply == NULL) { rc = -ETIME; } switch (rc) { case pcmk_ok: case -EPERM: break; /* This is an internal value that clients do not and should not care about */ case -pcmk_err_diff_resync: rc = pcmk_ok; break; /* These indicate internal problems */ case -EPROTO: case -ENOMSG: crm_err("Call failed: %s", pcmk_strerror(rc)); if (op_reply) { crm_log_xml_err(op_reply, "Invalid reply"); } break; default: if (!pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none)) { crm_warn("Call failed: %s", pcmk_strerror(rc)); } } done: if (!crm_ipc_connected(native->ipc)) { crm_err("The CIB manager disconnected"); cib->state = cib_disconnected; } pcmk__xml_free(op_msg); pcmk__xml_free(op_reply); return rc; } static int cib_native_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; xmlNode *msg = NULL; cib_t *cib = userdata; crm_trace("dispatching %p", userdata); if (cib == NULL) { crm_err("No CIB!"); return 0; } msg = pcmk__xml_parse(buffer); if (msg == NULL) { crm_warn("Received a NULL message from the CIB manager"); return 0; } /* do callbacks */ type = pcmk__xe_get(msg, PCMK__XA_T); crm_trace("Activating %s callbacks...", type); crm_log_xml_explicit(msg, "cib-reply"); if (pcmk__str_eq(type, PCMK__VALUE_CIB, pcmk__str_none)) { cib_native_callback(cib, msg, 0, 0); } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_NOTIFY, pcmk__str_none)) { g_list_foreach(cib->notify_list, cib_native_notify, msg); } else { crm_err("Unknown message type: %s", type); } pcmk__xml_free(msg); return 0; } static void cib_native_destroy(void *userdata) { cib_t *cib = userdata; cib_native_opaque_t *native = cib->variant_opaque; crm_trace("destroying %p", userdata); cib->state = cib_disconnected; native->source = NULL; native->ipc = NULL; if (native->dnotify_fn) { native->dnotify_fn(userdata); } } static int cib_native_signoff(cib_t *cib) { cib_native_opaque_t *native = cib->variant_opaque; crm_debug("Disconnecting from the CIB manager"); cib_free_notify(cib); remove_cib_op_callback(0, TRUE); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } cib->cmds->end_transaction(cib, false, cib_none); cib->state = cib_disconnected; cib->type = cib_no_connection; return pcmk_ok; } static int cib_native_signon(cib_t *cib, const char *name, enum cib_conn_type type) { int rc = pcmk_ok; const char *channel = NULL; cib_native_opaque_t *native = cib->variant_opaque; xmlNode *hello = NULL; struct ipc_client_callbacks cib_callbacks = { .dispatch = cib_native_dispatch_internal, .destroy = cib_native_destroy }; if (name == NULL) { name = pcmk__s(crm_system_name, "client"); } cib->call_timeout = PCMK__IPC_TIMEOUT; if (type == cib_command) { cib->state = cib_connected_command; channel = PCMK__SERVER_BASED_RW; } else if (type == cib_command_nonblocking) { cib->state = cib_connected_command; channel = PCMK__SERVER_BASED_SHM; } else if (type == cib_query) { cib->state = cib_connected_query; channel = PCMK__SERVER_BASED_RO; } else { return -ENOTCONN; } crm_trace("Connecting %s channel", channel); native->source = mainloop_add_ipc_client(channel, G_PRIORITY_HIGH, 512 * 1024, cib, &cib_callbacks); native->ipc = mainloop_get_ipc_client(native->source); if (rc != pcmk_ok || native->ipc == NULL || !crm_ipc_connected(native->ipc)) { crm_info("Could not connect to CIB manager for %s", name); rc = -ENOTCONN; } if (rc == pcmk_ok) { rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL, cib_sync_call, NULL, name, &hello); } if (rc == pcmk_ok) { xmlNode *reply = NULL; if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply) > 0) { const char *msg_type = pcmk__xe_get(reply, PCMK__XA_CIB_OP); crm_log_xml_trace(reply, "reg-reply"); if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) { crm_info("Reply to CIB registration message has unknown type " "'%s'", msg_type); rc = -EPROTO; } else { - native->token = crm_element_value_copy(reply, - PCMK__XA_CIB_CLIENTID); + native->token = pcmk__xe_get_copy(reply, PCMK__XA_CIB_CLIENTID); if (native->token == NULL) { rc = -EPROTO; } } pcmk__xml_free(reply); } else { rc = -ECOMM; } pcmk__xml_free(hello); } if (rc == pcmk_ok) { crm_info("Successfully connected to CIB manager for %s", name); return pcmk_ok; } crm_info("Connection to CIB manager for %s failed: %s", name, pcmk_strerror(rc)); cib_native_signoff(cib); return rc; } static int cib_native_free(cib_t *cib) { int rc = pcmk_ok; if (cib->state != cib_disconnected) { rc = cib_native_signoff(cib); } if (cib->state == cib_disconnected) { cib_native_opaque_t *native = cib->variant_opaque; free(native->token); free(cib->variant_opaque); free(cib->cmds); free(cib->user); free(cib); } return rc; } static int cib_native_register_notification(cib_t *cib, const char *callback, int enabled) { int rc = pcmk_ok; xmlNode *notify_msg = pcmk__xe_create(NULL, PCMK__XE_CIB_CALLBACK); cib_native_opaque_t *native = cib->variant_opaque; if (cib->state != cib_disconnected) { crm_xml_add(notify_msg, PCMK__XA_CIB_OP, PCMK__VALUE_CIB_NOTIFY); crm_xml_add(notify_msg, PCMK__XA_CIB_NOTIFY_TYPE, callback); crm_xml_add_int(notify_msg, PCMK__XA_CIB_NOTIFY_ACTIVATE, enabled); rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, 1000 * cib->call_timeout, NULL); if (rc <= 0) { crm_trace("Notification not registered: %d", rc); rc = -ECOMM; } } pcmk__xml_free(notify_msg); return rc; } static int cib_native_set_connection_dnotify(cib_t *cib, void (*dnotify) (gpointer user_data)) { cib_native_opaque_t *native = NULL; if (cib == NULL) { crm_err("No CIB!"); return FALSE; } native = cib->variant_opaque; native->dnotify_fn = dnotify; return pcmk_ok; } /*! * \internal * \brief Get the given CIB connection's unique client identifier * * These can be used to check whether this client requested the action that * triggered a CIB notification. * * \param[in] cib CIB connection * \param[out] async_id If not \p NULL, where to store asynchronous client ID * \param[out] sync_id If not \p NULL, where to store synchronous client ID * * \return Legacy Pacemaker return code (specifically, \p pcmk_ok) * * \note This is the \p cib_native variant implementation of * \p cib_api_operations_t:client_id(). * \note For \p cib_native objects, \p async_id and \p sync_id are the same. * \note The client ID is assigned during CIB sign-on. */ static int cib_native_client_id(const cib_t *cib, const char **async_id, const char **sync_id) { cib_native_opaque_t *native = cib->variant_opaque; if (async_id != NULL) { *async_id = native->token; } if (sync_id != NULL) { *sync_id = native->token; } return pcmk_ok; } cib_t * cib_native_new(void) { cib_native_opaque_t *native = NULL; cib_t *cib = cib_new_variant(); if (cib == NULL) { return NULL; } native = calloc(1, sizeof(cib_native_opaque_t)); if (native == NULL) { free(cib); return NULL; } cib->variant = cib_native; cib->variant_opaque = native; native->ipc = NULL; native->source = NULL; native->dnotify_fn = NULL; /* assign variant specific ops */ cib->delegate_fn = cib_native_perform_op_delegate; cib->cmds->signon = cib_native_signon; cib->cmds->signoff = cib_native_signoff; cib->cmds->free = cib_native_free; cib->cmds->register_notification = cib_native_register_notification; cib->cmds->set_connection_dnotify = cib_native_set_connection_dnotify; cib->cmds->client_id = cib_native_client_id; return cib; } diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c index 7b42eaf1c0..705c3e0980 100644 --- a/lib/cib/cib_ops.c +++ b/lib/cib/cib_ops.c @@ -1,832 +1,832 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include // xmlXPathObject, etc. #include #include #include #include // @TODO: Free this via crm_exit() when libcib gets merged with libcrmcommon static GHashTable *operation_table = NULL; static const cib__operation_t cib_ops[] = { { PCMK__CIB_REQUEST_ABS_DELETE, cib__op_abs_delete, cib__op_attr_modifies|cib__op_attr_privileged }, { PCMK__CIB_REQUEST_APPLY_PATCH, cib__op_apply_patch, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_BUMP, cib__op_bump, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_COMMIT_TRANSACT, cib__op_commit_transact, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_replaces |cib__op_attr_writes_through }, { PCMK__CIB_REQUEST_CREATE, cib__op_create, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_DELETE, cib__op_delete, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_ERASE, cib__op_erase, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_replaces |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_IS_PRIMARY, cib__op_is_primary, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_MODIFY, cib__op_modify, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_NOOP, cib__op_noop, cib__op_attr_none }, { CRM_OP_PING, cib__op_ping, cib__op_attr_none }, { // @COMPAT: Drop cib__op_attr_modifies when we drop legacy mode support PCMK__CIB_REQUEST_PRIMARY, cib__op_primary, cib__op_attr_modifies|cib__op_attr_privileged|cib__op_attr_local }, { PCMK__CIB_REQUEST_QUERY, cib__op_query, cib__op_attr_none }, { PCMK__CIB_REQUEST_REPLACE, cib__op_replace, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_replaces |cib__op_attr_writes_through |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_SECONDARY, cib__op_secondary, cib__op_attr_privileged|cib__op_attr_local }, { PCMK__CIB_REQUEST_SHUTDOWN, cib__op_shutdown, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_SYNC_TO_ALL, cib__op_sync_all, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_SYNC_TO_ONE, cib__op_sync_one, cib__op_attr_privileged }, { PCMK__CIB_REQUEST_UPGRADE, cib__op_upgrade, cib__op_attr_modifies |cib__op_attr_privileged |cib__op_attr_writes_through |cib__op_attr_transaction }, { PCMK__CIB_REQUEST_SCHEMAS, cib__op_schemas, cib__op_attr_local } }; /*! * \internal * \brief Get the \c cib__operation_t object for a given CIB operation name * * \param[in] op CIB operation name * \param[out] operation Where to store CIB operation object * * \return Standard Pacemaker return code */ int cib__get_operation(const char *op, const cib__operation_t **operation) { pcmk__assert((op != NULL) && (operation != NULL)); if (operation_table == NULL) { operation_table = pcmk__strkey_table(NULL, NULL); for (int lpc = 0; lpc < PCMK__NELEM(cib_ops); lpc++) { const cib__operation_t *oper = &(cib_ops[lpc]); g_hash_table_insert(operation_table, (gpointer) oper->name, (gpointer) oper); } } *operation = g_hash_table_lookup(operation_table, op); if (*operation == NULL) { crm_err("Operation %s is invalid", op); return EINVAL; } return pcmk_rc_ok; } int cib_process_query(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *obj_root = NULL; int result = pcmk_ok; crm_trace("Processing %s for %s section", op, pcmk__s(section, "unspecified")); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } CRM_CHECK(*answer == NULL, pcmk__xml_free(*answer)); *answer = NULL; if (pcmk__str_eq(PCMK__XE_ALL, section, pcmk__str_casei)) { section = NULL; } obj_root = pcmk_find_cib_element(existing_cib, section); if (obj_root == NULL) { result = -ENXIO; } else if (options & cib_no_children) { xmlNode *shallow = pcmk__xe_create(*answer, (const char *) obj_root->name); pcmk__xe_copy_attrs(shallow, obj_root, pcmk__xaf_none); *answer = shallow; } else { *answer = obj_root; } if (result == pcmk_ok && *answer == NULL) { crm_err("Error creating query response"); result = -ENOMSG; } return result; } static int update_counter(xmlNode *xml_obj, const char *field, bool reset) { char *new_value = NULL; char *old_value = NULL; int int_value = -1; if (!reset && pcmk__xe_get(xml_obj, field) != NULL) { - old_value = crm_element_value_copy(xml_obj, field); + old_value = pcmk__xe_get_copy(xml_obj, field); } if (old_value != NULL) { int_value = atoi(old_value); new_value = pcmk__itoa(++int_value); } else { new_value = pcmk__str_copy("1"); } crm_trace("Update %s from %s to %s", field, pcmk__s(old_value, "unset"), new_value); crm_xml_add(xml_obj, field, new_value); free(new_value); free(old_value); return pcmk_ok; } int cib_process_erase(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing \"%s\" event", op); if (*result_cib != existing_cib) { pcmk__xml_free(*result_cib); } *result_cib = createEmptyCib(0); pcmk__xe_copy_attrs(*result_cib, existing_cib, pcmk__xaf_none); update_counter(*result_cib, PCMK_XA_ADMIN_EPOCH, false); *answer = NULL; return result; } int cib_process_upgrade(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int rc = 0; const char *max_schema = pcmk__xe_get(req, PCMK__XA_CIB_SCHEMA_MAX); const char *original_schema = NULL; const char *new_schema = NULL; *answer = NULL; crm_trace("Processing \"%s\" event with max=%s", op, max_schema); original_schema = pcmk__xe_get(existing_cib, PCMK_XA_VALIDATE_WITH); rc = pcmk__update_schema(result_cib, max_schema, true, !pcmk_is_set(options, cib_verbose)); rc = pcmk_rc2legacy(rc); new_schema = pcmk__xe_get(*result_cib, PCMK_XA_VALIDATE_WITH); if (pcmk__cmp_schemas_by_name(new_schema, original_schema) > 0) { update_counter(*result_cib, PCMK_XA_ADMIN_EPOCH, false); update_counter(*result_cib, PCMK_XA_EPOCH, true); update_counter(*result_cib, PCMK_XA_NUM_UPDATES, true); return pcmk_ok; } return rc; } int cib_process_bump(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing %s for epoch='%s'", op, pcmk__s(pcmk__xe_get(existing_cib, PCMK_XA_EPOCH), "")); *answer = NULL; update_counter(*result_cib, PCMK_XA_EPOCH, false); return result; } int cib_process_replace(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing %s for %s section", op, pcmk__s(section, "unspecified")); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } *answer = NULL; if (input == NULL) { return -EINVAL; } if (pcmk__str_eq(PCMK__XE_ALL, section, pcmk__str_casei)) { section = NULL; } else if (pcmk__xe_is(input, section)) { section = NULL; } if (pcmk__xe_is(input, PCMK_XE_CIB)) { int updates = 0; int epoch = 0; int admin_epoch = 0; int replace_updates = 0; int replace_epoch = 0; int replace_admin_epoch = 0; const char *reason = NULL; const char *peer = pcmk__xe_get(req, PCMK__XA_SRC); const char *digest = pcmk__xe_get(req, PCMK__XA_DIGEST); if (digest) { char *digest_verify = pcmk__digest_xml(input, true); if (!pcmk__str_eq(digest_verify, digest, pcmk__str_casei)) { crm_err("Digest mis-match on replace from %s: %s vs. %s (expected)", peer, digest_verify, digest); reason = "digest mismatch"; } else { crm_info("Digest matched on replace from %s: %s", peer, digest); } free(digest_verify); } else { crm_trace("No digest to verify"); } cib_version_details(existing_cib, &admin_epoch, &epoch, &updates); cib_version_details(input, &replace_admin_epoch, &replace_epoch, &replace_updates); if (replace_admin_epoch < admin_epoch) { reason = PCMK_XA_ADMIN_EPOCH; } else if (replace_admin_epoch > admin_epoch) { /* no more checks */ } else if (replace_epoch < epoch) { reason = PCMK_XA_EPOCH; } else if (replace_epoch > epoch) { /* no more checks */ } else if (replace_updates < updates) { reason = PCMK_XA_NUM_UPDATES; } if (reason != NULL) { crm_info("Replacement %d.%d.%d from %s not applied to %d.%d.%d:" " current %s is greater than the replacement", replace_admin_epoch, replace_epoch, replace_updates, peer, admin_epoch, epoch, updates, reason); result = -pcmk_err_old_data; } else { crm_info("Replaced %d.%d.%d with %d.%d.%d from %s", admin_epoch, epoch, updates, replace_admin_epoch, replace_epoch, replace_updates, peer); } if (*result_cib != existing_cib) { pcmk__xml_free(*result_cib); } *result_cib = pcmk__xml_copy(NULL, input); } else { xmlNode *obj_root = NULL; obj_root = pcmk_find_cib_element(*result_cib, section); result = pcmk__xe_replace_match(obj_root, input); result = pcmk_rc2legacy(result); if (result != pcmk_ok) { crm_trace("No matching object to replace"); } } return result; } static int delete_child(xmlNode *child, void *userdata) { xmlNode *obj_root = userdata; if (pcmk__xe_delete_match(obj_root, child) != pcmk_rc_ok) { crm_trace("No matching object to delete: %s=%s", child->name, pcmk__xe_id(child)); } return pcmk_rc_ok; } int cib_process_delete(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *obj_root = NULL; crm_trace("Processing \"%s\" event", op); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } if (input == NULL) { crm_err("Cannot perform modification with no data"); return -EINVAL; } obj_root = pcmk_find_cib_element(*result_cib, section); if (pcmk__xe_is(input, section)) { pcmk__xe_foreach_child(input, NULL, delete_child, obj_root); } else { delete_child(input, obj_root); } return pcmk_ok; } int cib_process_modify(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *obj_root = NULL; uint32_t flags = pcmk__xaf_none; crm_trace("Processing \"%s\" event", op); if (options & cib_xpath) { return cib_process_xpath(op, options, section, req, input, existing_cib, result_cib, answer); } if (input == NULL) { crm_err("Cannot perform modification with no data"); return -EINVAL; } obj_root = pcmk_find_cib_element(*result_cib, section); if (obj_root == NULL) { xmlNode *tmp_section = NULL; const char *path = pcmk_cib_parent_name_for(section); if (path == NULL) { return -EINVAL; } tmp_section = pcmk__xe_create(NULL, section); cib_process_xpath(PCMK__CIB_REQUEST_CREATE, 0, path, NULL, tmp_section, NULL, result_cib, answer); pcmk__xml_free(tmp_section); obj_root = pcmk_find_cib_element(*result_cib, section); } CRM_CHECK(obj_root != NULL, return -EINVAL); if (pcmk_is_set(options, cib_score_update)) { flags |= pcmk__xaf_score_update; } if (pcmk__xe_update_match(obj_root, input, flags) != pcmk_rc_ok) { if (options & cib_can_create) { pcmk__xml_copy(obj_root, input); } else { return -ENXIO; } } return pcmk_ok; } static int add_cib_object(xmlNode * parent, xmlNode * new_obj) { const char *object_name = NULL; const char *object_id = NULL; if ((parent == NULL) || (new_obj == NULL)) { return -EINVAL; } object_name = (const char *) new_obj->name; if (object_name == NULL) { return -EINVAL; } object_id = pcmk__xe_id(new_obj); if (pcmk__xe_first_child(parent, object_name, ((object_id != NULL)? PCMK_XA_ID : NULL), object_id)) { return -EEXIST; } if (object_id != NULL) { crm_trace("Processing creation of <%s " PCMK_XA_ID "='%s'>", object_name, object_id); } else { crm_trace("Processing creation of <%s>", object_name); } /* @COMPAT PCMK__XA_REPLACE is deprecated since 2.1.6. Due to a legacy use * case, PCMK__XA_REPLACE has special meaning and should not be included in * the newly created object until we can break behavioral backward * compatibility. * * At a compatibility break, drop this and drop the definition of * PCMK__XA_REPLACE. Treat it like any other attribute. */ pcmk__xml_tree_foreach(new_obj, pcmk__xe_remove_attr_cb, (void *) PCMK__XA_REPLACE); pcmk__xml_copy(parent, new_obj); return pcmk_ok; } static bool update_results(xmlNode *failed, xmlNode *target, const char *operation, int return_code) { xmlNode *xml_node = NULL; bool was_error = false; const char *error_msg = NULL; if (return_code != pcmk_ok) { error_msg = pcmk_strerror(return_code); was_error = true; xml_node = pcmk__xe_create(failed, PCMK__XE_FAILED_UPDATE); pcmk__xml_copy(xml_node, target); crm_xml_add(xml_node, PCMK_XA_ID, pcmk__xe_id(target)); crm_xml_add(xml_node, PCMK_XA_OBJECT_TYPE, (const char *) target->name); crm_xml_add(xml_node, PCMK_XA_OPERATION, operation); crm_xml_add(xml_node, PCMK_XA_REASON, error_msg); crm_warn("Action %s failed: %s (cde=%d)", operation, error_msg, return_code); } return was_error; } int cib_process_create(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { xmlNode *failed = NULL; int result = pcmk_ok; xmlNode *update_section = NULL; crm_trace("Processing %s for %s section", op, pcmk__s(section, "unspecified")); if (pcmk__str_eq(PCMK__XE_ALL, section, pcmk__str_casei)) { section = NULL; } else if (pcmk__str_eq(section, PCMK_XE_CIB, pcmk__str_casei)) { section = NULL; } else if (pcmk__xe_is(input, PCMK_XE_CIB)) { section = NULL; } CRM_CHECK(strcmp(op, PCMK__CIB_REQUEST_CREATE) == 0, return -EINVAL); if (input == NULL) { crm_err("Cannot perform modification with no data"); return -EINVAL; } if (section == NULL) { return cib_process_modify(op, options, section, req, input, existing_cib, result_cib, answer); } // @COMPAT Deprecated since 2.1.8 failed = pcmk__xe_create(NULL, PCMK__XE_FAILED); update_section = pcmk_find_cib_element(*result_cib, section); if (pcmk__xe_is(input, section)) { xmlNode *a_child = NULL; for (a_child = pcmk__xml_first_child(input); a_child != NULL; a_child = pcmk__xml_next(a_child)) { result = add_cib_object(update_section, a_child); if (update_results(failed, a_child, op, result)) { break; } } } else { result = add_cib_object(update_section, input); update_results(failed, input, op, result); } if ((result == pcmk_ok) && (failed->children != NULL)) { result = -EINVAL; } if (result != pcmk_ok) { crm_log_xml_err(failed, "CIB Update failures"); *answer = failed; } else { pcmk__xml_free(failed); } return result; } int cib_process_diff(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { const char *originator = NULL; if (req != NULL) { originator = pcmk__xe_get(req, PCMK__XA_SRC); } crm_trace("Processing \"%s\" event from %s%s", op, originator, (pcmk_is_set(options, cib_force_diff)? " (global update)" : "")); if (*result_cib != existing_cib) { pcmk__xml_free(*result_cib); } *result_cib = pcmk__xml_copy(NULL, existing_cib); return xml_apply_patchset(*result_cib, input, TRUE); } int cib_process_xpath(const char *op, int options, const char *section, const xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { int num_results = 0; int rc = pcmk_ok; bool is_query = pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none); bool delete_multiple = pcmk_is_set(options, cib_multiple) && pcmk__str_eq(op, PCMK__CIB_REQUEST_DELETE, pcmk__str_none); xmlXPathObject *xpathObj = NULL; crm_trace("Processing \"%s\" event", op); if (is_query) { xpathObj = pcmk__xpath_search(existing_cib->doc, section); } else { xpathObj = pcmk__xpath_search((*result_cib)->doc, section); } num_results = pcmk__xpath_num_results(xpathObj); if (num_results == 0) { if (pcmk__str_eq(op, PCMK__CIB_REQUEST_DELETE, pcmk__str_none)) { crm_debug("%s was already removed", section); } else { crm_debug("%s: %s does not exist", op, section); rc = -ENXIO; } goto done; } if (is_query && (num_results > 1)) { *answer = pcmk__xe_create(NULL, PCMK__XE_XPATH_QUERY); } for (int i = 0; i < num_results; i++) { xmlNode *match = NULL; xmlChar *path = NULL; /* If we're deleting multiple nodes, go in reverse document order. * If we go in forward order and the node set contains both a parent and * its descendant, then deleting the parent frees the descendant before * the loop reaches the descendant. This is a use-after-free error. * * @COMPAT cib_multiple is only ever used with delete operations. The * correct order to process multiple nodes for operations other than * query (forward) and delete (reverse) is less clear but likely should * be reverse. If we ever replace the CIB public API with libpacemaker * functions, revisit this. For now, we keep forward order for other * operations to preserve backward compatibility, even though external * callers of other ops with cib_multiple might segfault. * * For more info, see comment in xpath2.c:update_xpath_nodes() in * libxml2. */ if (delete_multiple) { match = pcmk__xpath_result(xpathObj, num_results - 1 - i); } else { match = pcmk__xpath_result(xpathObj, i); } if (match == NULL) { continue; } path = xmlGetNodePath(match); crm_debug("Processing %s op for %s with %s", op, section, path); free(path); if (pcmk__str_eq(op, PCMK__CIB_REQUEST_DELETE, pcmk__str_none)) { if (match == *result_cib) { /* Attempting to delete the whole "/cib" */ crm_warn("Cannot perform %s for %s: The xpath is addressing the whole /cib", op, section); rc = -EINVAL; break; } pcmk__xml_free(match); if ((options & cib_multiple) == 0) { break; } } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_MODIFY, pcmk__str_none)) { uint32_t flags = pcmk__xaf_none; if (pcmk_is_set(options, cib_score_update)) { flags |= pcmk__xaf_score_update; } if (pcmk__xe_update_match(match, input, flags) != pcmk_rc_ok) { rc = -ENXIO; } else if ((options & cib_multiple) == 0) { break; } } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_CREATE, pcmk__str_none)) { pcmk__xml_copy(match, input); break; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none)) { if (options & cib_no_children) { xmlNode *shallow = pcmk__xe_create(*answer, (const char *) match->name); pcmk__xe_copy_attrs(shallow, match, pcmk__xaf_none); if (*answer == NULL) { *answer = shallow; } } else if (options & cib_xpath_address) { char *path = NULL; xmlNode *parent = match; while (parent && parent->type == XML_ELEMENT_NODE) { const char *id = pcmk__xe_get(parent, PCMK_XA_ID); char *new_path = NULL; if (id) { new_path = crm_strdup_printf("/%s[@" PCMK_XA_ID "='%s']" "%s", parent->name, id, pcmk__s(path, "")); } else { new_path = crm_strdup_printf("/%s%s", parent->name, pcmk__s(path, "")); } free(path); path = new_path; parent = parent->parent; } crm_trace("Got: %s", path); if (*answer == NULL) { *answer = pcmk__xe_create(NULL, PCMK__XE_XPATH_QUERY); } parent = pcmk__xe_create(*answer, PCMK__XE_XPATH_QUERY_PATH); crm_xml_add(parent, PCMK_XA_ID, path); free(path); } else if (*answer) { pcmk__xml_copy(*answer, match); } else { *answer = match; } } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) { xmlNode *parent = match->parent; pcmk__xml_free(match); pcmk__xml_copy(parent, input); if ((options & cib_multiple) == 0) { break; } } } done: xmlXPathFreeObject(xpathObj); return rc; } diff --git a/lib/common/alerts.c b/lib/common/alerts.c index 5d6ea20dc0..78c7ce0c55 100644 --- a/lib/common/alerts.c +++ b/lib/common/alerts.c @@ -1,437 +1,437 @@ /* * Copyright 2015-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include const char *pcmk__alert_keys[PCMK__ALERT_INTERNAL_KEY_MAX] = { [PCMK__alert_key_recipient] = "CRM_alert_recipient", [PCMK__alert_key_node] = "CRM_alert_node", [PCMK__alert_key_nodeid] = "CRM_alert_nodeid", [PCMK__alert_key_rsc] = "CRM_alert_rsc", [PCMK__alert_key_task] = "CRM_alert_task", [PCMK__alert_key_interval] = "CRM_alert_interval", [PCMK__alert_key_desc] = "CRM_alert_desc", [PCMK__alert_key_status] = "CRM_alert_status", [PCMK__alert_key_target_rc] = "CRM_alert_target_rc", [PCMK__alert_key_rc] = "CRM_alert_rc", [PCMK__alert_key_kind] = "CRM_alert_kind", [PCMK__alert_key_version] = "CRM_alert_version", [PCMK__alert_key_node_sequence] = PCMK__ALERT_NODE_SEQUENCE, [PCMK__alert_key_timestamp] = "CRM_alert_timestamp", [PCMK__alert_key_attribute_name] = "CRM_alert_attribute_name", [PCMK__alert_key_attribute_value] = "CRM_alert_attribute_value", [PCMK__alert_key_timestamp_epoch] = "CRM_alert_timestamp_epoch", [PCMK__alert_key_timestamp_usec] = "CRM_alert_timestamp_usec", [PCMK__alert_key_exec_time] = "CRM_alert_exec_time", }; /*! * \brief Create a new alert entry structure * * \param[in] id ID to use * \param[in] path Path to alert agent executable * * \return Pointer to newly allocated alert entry * \note Non-string fields will be filled in with defaults. * It is the caller's responsibility to free the result, * using pcmk__free_alert(). */ pcmk__alert_t * pcmk__alert_new(const char *id, const char *path) { pcmk__alert_t *entry = pcmk__assert_alloc(1, sizeof(pcmk__alert_t)); pcmk__assert((id != NULL) && (path != NULL)); entry->id = pcmk__str_copy(id); entry->path = pcmk__str_copy(path); entry->timeout = PCMK__ALERT_DEFAULT_TIMEOUT_MS; entry->flags = pcmk__alert_default; return entry; } void pcmk__free_alert(pcmk__alert_t *entry) { if (entry) { free(entry->id); free(entry->path); free(entry->tstamp_format); free(entry->recipient); g_strfreev(entry->select_attribute_name); if (entry->envvars) { g_hash_table_destroy(entry->envvars); } free(entry); } } /*! * \internal * \brief Duplicate an alert entry * * \param[in] entry Alert entry to duplicate * * \return Duplicate of alert entry */ pcmk__alert_t * pcmk__dup_alert(const pcmk__alert_t *entry) { pcmk__alert_t *new_entry = pcmk__alert_new(entry->id, entry->path); new_entry->timeout = entry->timeout; new_entry->flags = entry->flags; new_entry->envvars = pcmk__str_table_dup(entry->envvars); new_entry->tstamp_format = pcmk__str_copy(entry->tstamp_format); new_entry->recipient = pcmk__str_copy(entry->recipient); if (entry->select_attribute_name) { new_entry->select_attribute_name = g_strdupv(entry->select_attribute_name); } return new_entry; } void pcmk__add_alert_key(GHashTable *table, enum pcmk__alert_keys_e name, const char *value) { pcmk__assert((table != NULL) && (name >= 0) && (name < PCMK__ALERT_INTERNAL_KEY_MAX)); if (value == NULL) { crm_trace("Removing alert key %s", pcmk__alert_keys[name]); g_hash_table_remove(table, pcmk__alert_keys[name]); } else { crm_trace("Inserting alert key %s = '%s'", pcmk__alert_keys[name], value); pcmk__insert_dup(table, pcmk__alert_keys[name], value); } } void pcmk__add_alert_key_int(GHashTable *table, enum pcmk__alert_keys_e name, int value) { pcmk__assert((table != NULL) && (name >= 0) && (name < PCMK__ALERT_INTERNAL_KEY_MAX)); crm_trace("Inserting alert key %s = %d", pcmk__alert_keys[name], value); g_hash_table_insert(table, pcmk__str_copy(pcmk__alert_keys[name]), pcmk__itoa(value)); } #define READABLE_DEFAULT pcmk__readable_interval(PCMK__ALERT_DEFAULT_TIMEOUT_MS) /*! * \internal * \brief Unpack options for an alert or alert recipient from its * meta-attributes in the CIB XML configuration * * \param[in,out] xml Alert or recipient XML * \param[in,out] entry Where to store unpacked values * \param[in,out] max_timeout Max timeout of all alerts and recipients thus far * * \return Standard Pacemaker return code */ static int unpack_alert_options(xmlNode *xml, pcmk__alert_t *entry, guint *max_timeout) { GHashTable *config_hash = pcmk__strkey_table(free, free); crm_time_t *now = crm_time_new(NULL); const char *value = NULL; int rc = pcmk_rc_ok; pcmk_rule_input_t rule_input = { .now = now, }; pcmk_unpack_nvpair_blocks(xml, PCMK_XE_META_ATTRIBUTES, NULL, &rule_input, config_hash, NULL); crm_time_free(now); value = g_hash_table_lookup(config_hash, PCMK_META_ENABLED); if ((value != NULL) && !crm_is_true(value)) { // No need to continue unpacking rc = pcmk_rc_disabled; goto done; } value = g_hash_table_lookup(config_hash, PCMK_META_TIMEOUT); if (value != NULL) { long long timeout_ms = crm_get_msec(value); entry->timeout = (int) QB_MIN(timeout_ms, INT_MAX); if (entry->timeout <= 0) { if (entry->timeout == 0) { crm_trace("Alert %s uses default timeout (%s)", entry->id, READABLE_DEFAULT); } else { pcmk__config_warn("Using default timeout (%s) for alert %s " "because '%s' is not a valid timeout", entry->id, value, READABLE_DEFAULT); } entry->timeout = PCMK__ALERT_DEFAULT_TIMEOUT_MS; } else { crm_trace("Alert %s uses timeout of %s", entry->id, pcmk__readable_interval(entry->timeout)); } if (entry->timeout > *max_timeout) { *max_timeout = entry->timeout; } } value = g_hash_table_lookup(config_hash, PCMK_META_TIMESTAMP_FORMAT); if (value != NULL) { /* hard to do any checks here as merely anything can * can be a valid time-format-string */ entry->tstamp_format = strdup(value); crm_trace("Alert %s uses timestamp format '%s'", entry->id, entry->tstamp_format); } done: g_hash_table_destroy(config_hash); return rc; } /*! * \internal * \brief Unpack agent parameters for an alert or alert recipient into an * environment variable list based on its CIB XML configuration * * \param[in] xml Alert or recipient XML * \param[in,out] entry Alert entry to create environment variables for */ static void unpack_alert_parameters(const xmlNode *xml, pcmk__alert_t *entry) { xmlNode *child; if ((xml == NULL) || (entry == NULL)) { return; } child = pcmk__xe_first_child(xml, PCMK_XE_INSTANCE_ATTRIBUTES, NULL, NULL); if (child == NULL) { return; } if (entry->envvars == NULL) { entry->envvars = pcmk__strkey_table(free, free); } for (child = pcmk__xe_first_child(child, PCMK_XE_NVPAIR, NULL, NULL); child != NULL; child = pcmk__xe_next(child, PCMK_XE_NVPAIR)) { const char *name = pcmk__xe_get(child, PCMK_XA_NAME); const char *value = pcmk__xe_get(child, PCMK_XA_VALUE); if (value == NULL) { value = ""; } pcmk__insert_dup(entry->envvars, name, value); crm_trace("Alert %s: added environment variable %s='%s'", entry->id, name, value); } } /*! * \internal * \brief Create filters for an alert or alert recipient based on its * configuration in CIB XML * * \param[in] xml Alert or recipient XML * \param[in,out] entry Alert entry to create filters for */ static void unpack_alert_filter(xmlNode *xml, pcmk__alert_t *entry) { xmlNode *select = pcmk__xe_first_child(xml, PCMK_XE_SELECT, NULL, NULL); xmlNode *event_type = NULL; uint32_t flags = pcmk__alert_none; for (event_type = pcmk__xe_first_child(select, NULL, NULL, NULL); event_type != NULL; event_type = pcmk__xe_next(event_type, NULL)) { if (pcmk__xe_is(event_type, PCMK_XE_SELECT_FENCING)) { flags |= pcmk__alert_fencing; } else if (pcmk__xe_is(event_type, PCMK_XE_SELECT_NODES)) { flags |= pcmk__alert_node; } else if (pcmk__xe_is(event_type, PCMK_XE_SELECT_RESOURCES)) { flags |= pcmk__alert_resource; } else if (pcmk__xe_is(event_type, PCMK_XE_SELECT_ATTRIBUTES)) { xmlNode *attr; const char *attr_name; int nattrs = 0; flags |= pcmk__alert_attribute; for (attr = pcmk__xe_first_child(event_type, PCMK_XE_ATTRIBUTE, NULL, NULL); attr != NULL; attr = pcmk__xe_next(attr, PCMK_XE_ATTRIBUTE)) { attr_name = pcmk__xe_get(attr, PCMK_XA_NAME); if (attr_name) { if (nattrs == 0) { g_strfreev(entry->select_attribute_name); entry->select_attribute_name = NULL; } ++nattrs; entry->select_attribute_name = pcmk__realloc(entry->select_attribute_name, (nattrs + 1) * sizeof(char*)); entry->select_attribute_name[nattrs - 1] = strdup(attr_name); entry->select_attribute_name[nattrs] = NULL; } } } } if (flags != pcmk__alert_none) { entry->flags = flags; crm_debug("Alert %s receives events: attributes:%s%s%s%s", entry->id, (pcmk_is_set(flags, pcmk__alert_attribute)? (entry->select_attribute_name? "some" : "all") : "none"), (pcmk_is_set(flags, pcmk__alert_fencing)? " fencing" : ""), (pcmk_is_set(flags, pcmk__alert_node)? " nodes" : ""), (pcmk_is_set(flags, pcmk__alert_resource)? " resources" : "")); } } /*! * \internal * \brief Unpack an alert or an alert recipient * * \param[in,out] alert Alert or recipient XML * \param[in,out] entry Where to store unpacked values * \param[in,out] max_timeout Max timeout of all alerts and recipients thus far * * \return Standard Pacemaker return code */ static int unpack_alert(xmlNode *alert, pcmk__alert_t *entry, guint *max_timeout) { int rc = pcmk_rc_ok; unpack_alert_parameters(alert, entry); rc = unpack_alert_options(alert, entry, max_timeout); if (rc == pcmk_rc_ok) { unpack_alert_filter(alert, entry); } return rc; } /*! * \internal * \brief Unpack a CIB alerts section into a list of alert entries * * \param[in] alerts XML of CIB alerts section * * \return List of unpacked alert entries */ GList * pcmk__unpack_alerts(const xmlNode *alerts) { xmlNode *alert; pcmk__alert_t *entry; guint max_timeout = 0U; GList *alert_list = NULL; for (alert = pcmk__xe_first_child(alerts, PCMK_XE_ALERT, NULL, NULL); alert != NULL; alert = pcmk__xe_next(alert, PCMK_XE_ALERT)) { xmlNode *recipient = NULL; int recipients = 0; const char *alert_id = pcmk__xe_id(alert); const char *alert_path = pcmk__xe_get(alert, PCMK_XA_PATH); // Not possible with schema validation enabled if (alert_id == NULL) { pcmk__config_err("Ignoring invalid alert without " PCMK_XA_ID); continue; } if (alert_path == NULL) { pcmk__config_err("Ignoring invalid alert %s without " PCMK_XA_PATH, alert_id); continue; } entry = pcmk__alert_new(alert_id, alert_path); if (unpack_alert(alert, entry, &max_timeout) != pcmk_rc_ok) { // Don't allow recipients to override if entire alert is disabled crm_debug("Alert %s is disabled", entry->id); pcmk__free_alert(entry); continue; } if (entry->tstamp_format == NULL) { entry->tstamp_format = pcmk__str_copy(PCMK__ALERT_DEFAULT_TSTAMP_FORMAT); } crm_debug("Alert %s: path=%s timeout=%s tstamp-format='%s'", entry->id, entry->path, pcmk__readable_interval(entry->timeout), entry->tstamp_format); for (recipient = pcmk__xe_first_child(alert, PCMK_XE_RECIPIENT, NULL, NULL); recipient != NULL; recipient = pcmk__xe_next(recipient, PCMK_XE_RECIPIENT)) { pcmk__alert_t *recipient_entry = pcmk__dup_alert(entry); recipients++; - recipient_entry->recipient = crm_element_value_copy(recipient, - PCMK_XA_VALUE); + recipient_entry->recipient = pcmk__xe_get_copy(recipient, + PCMK_XA_VALUE); if (unpack_alert(recipient, recipient_entry, &max_timeout) != pcmk_rc_ok) { crm_debug("Alert %s: recipient %s is disabled", entry->id, recipient_entry->id); pcmk__free_alert(recipient_entry); continue; } alert_list = g_list_prepend(alert_list, recipient_entry); crm_debug("Alert %s has recipient %s with value %s and %d envvars", entry->id, pcmk__xe_id(recipient), recipient_entry->recipient, (recipient_entry->envvars? g_hash_table_size(recipient_entry->envvars) : 0)); } if (recipients == 0) { alert_list = g_list_prepend(alert_list, entry); } else { // Recipients were prepended individually above pcmk__free_alert(entry); } } return alert_list; } /*! * \internal * \brief Free an alert list generated by pcmk__unpack_alerts() * * \param[in,out] alert_list Alert list to free */ void pcmk__free_alerts(GList *alert_list) { if (alert_list != NULL) { g_list_free_full(alert_list, (GDestroyNotify) pcmk__free_alert); } } diff --git a/lib/common/digest.c b/lib/common/digest.c index 0c5cc8b4a0..ddefc8ac77 100644 --- a/lib/common/digest.c +++ b/lib/common/digest.c @@ -1,391 +1,391 @@ /* * Copyright 2015-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // GString, etc. #include // gnutls_hash_fast(), gnutls_hash_get_len() #include // gnutls_strerror() #include #include #include "crmcommon_private.h" #define BEST_EFFORT_STATUS 0 /* * Pacemaker uses digests (MD5 hashes) of stringified XML to detect changes in * the CIB as a whole, a particular resource's agent parameters, and the device * parameters last used to unfence a particular node. * * "v2" digests hash pcmk__xml_string() directly, while less efficient "v1" * digests do the same with a prefixed space, suffixed newline, and optional * pre-sorting. * * On-disk CIB digests use v1 without sorting. * * Operation digests use v1 with sorting, and are stored in a resource's * operation history in the CIB status section. They come in three flavors: * - a digest of (nearly) all resource parameters and options, used to detect * any resource configuration change; * - a digest of resource parameters marked as nonreloadable, used to decide * whether a reload or full restart is needed after a configuration change; * - and a digest of resource parameters not marked as private, used in * simulations where private parameters have been removed from the input. * * Unfencing digests are set as node attributes, and are used to require * that nodes be unfenced again after a device's configuration changes. */ /*! * \internal * \brief Dump XML in a format used with v1 digests * * \param[in] xml Root of XML to dump * * \return Newly allocated buffer containing dumped XML */ static GString * dump_xml_for_digest(const xmlNode *xml) { GString *buffer = g_string_sized_new(1024); /* for compatibility with the old result which is used for v1 digests */ g_string_append_c(buffer, ' '); pcmk__xml_string(xml, 0, buffer, 0); g_string_append_c(buffer, '\n'); return buffer; } /*! * \internal * \brief Calculate and return v1 digest of XML tree * * \param[in] input Root of XML to digest * * \return Newly allocated string containing digest * * \note Example return value: "c048eae664dba840e1d2060f00299e9d" */ static char * calculate_xml_digest_v1(const xmlNode *input) { GString *buffer = dump_xml_for_digest(input); char *digest = NULL; // buffer->len > 2 for initial space and trailing newline CRM_CHECK(buffer->len > 2, g_string_free(buffer, TRUE); return NULL); digest = crm_md5sum((const char *) buffer->str); crm_log_xml_trace(input, "digest:source"); g_string_free(buffer, TRUE); return digest; } /*! * \internal * \brief Calculate and return the digest of a CIB, suitable for storing on disk * * \param[in] input Root of XML to digest * * \return Newly allocated string containing digest */ char * pcmk__digest_on_disk_cib(const xmlNode *input) { /* Always use the v1 format for on-disk digests. * * Switching to v2 affects even full-restart upgrades, so it would be a * compatibility nightmare. * * We only use this once at startup. All other invocations are in a * separate child process. */ return calculate_xml_digest_v1(input); } /*! * \internal * \brief Calculate and return digest of an operation XML element * * The digest is invariant to changes in the order of XML attributes. * * \param[in] input Root of XML to digest (must have no children) * * \return Newly allocated string containing digest */ char * pcmk__digest_operation(const xmlNode *input) { /* Switching to v2 digests would likely cause restarts during rolling * upgrades. * * @TODO Confirm this. Switch to v2 if safe, or drop this TODO otherwise. */ char *digest = NULL; xmlNode *sorted = NULL; pcmk__assert(input->children == NULL); sorted = pcmk__xe_create(NULL, (const char *) input->name); pcmk__xe_copy_attrs(sorted, input, pcmk__xaf_none); pcmk__xe_sort_attrs(sorted); digest = calculate_xml_digest_v1(sorted); pcmk__xml_free(sorted); return digest; } /*! * \internal * \brief Calculate and return the digest of an XML tree * * \param[in] xml XML tree to digest * \param[in] filter Whether to filter certain XML attributes * * \return Newly allocated string containing digest */ char * pcmk__digest_xml(const xmlNode *xml, bool filter) { /* @TODO Filtering accounts for significant CPU usage. Consider removing if * possible. */ char *digest = NULL; GString *buf = g_string_sized_new(1024); pcmk__xml_string(xml, (filter? pcmk__xml_fmt_filtered : 0), buf, 0); digest = crm_md5sum(buf->str); if (digest == NULL) { goto done; } pcmk__if_tracing( { char *trace_file = crm_strdup_printf("%s/digest-%s", pcmk__get_tmpdir(), digest); crm_trace("Saving %s.%s.%s to %s", pcmk__xe_get(xml, PCMK_XA_ADMIN_EPOCH), pcmk__xe_get(xml, PCMK_XA_EPOCH), pcmk__xe_get(xml, PCMK_XA_NUM_UPDATES), trace_file); save_xml_to_file(xml, "digest input", trace_file); free(trace_file); }, {} ); done: g_string_free(buf, TRUE); return digest; } /*! * \internal * \brief Check whether calculated digest of given XML matches expected digest * * \param[in] input Root of XML tree to digest * \param[in] expected Expected digest in on-disk format * * \return true if digests match, false on mismatch or error */ bool pcmk__verify_digest(const xmlNode *input, const char *expected) { char *calculated = NULL; bool passed; if (input != NULL) { calculated = pcmk__digest_on_disk_cib(input); if (calculated == NULL) { crm_perror(LOG_ERR, "Could not calculate digest for comparison"); return false; } } passed = pcmk__str_eq(expected, calculated, pcmk__str_casei); if (passed) { crm_trace("Digest comparison passed: %s", calculated); } else { crm_err("Digest comparison failed: expected %s, calculated %s", expected, calculated); } free(calculated); return passed; } /*! * \internal * \brief Check whether an XML attribute should be excluded from CIB digests * * \param[in] name XML attribute name * * \return true if XML attribute should be excluded from CIB digest calculation */ bool pcmk__xa_filterable(const char *name) { static const char *filter[] = { PCMK_XA_CRM_DEBUG_ORIGIN, PCMK_XA_CIB_LAST_WRITTEN, PCMK_XA_UPDATE_ORIGIN, PCMK_XA_UPDATE_CLIENT, PCMK_XA_UPDATE_USER, }; for (int i = 0; i < PCMK__NELEM(filter); i++) { if (strcmp(name, filter[i]) == 0) { return true; } } return false; } char * crm_md5sum(const char *buffer) { char *digest = NULL; gchar *raw_digest = NULL; if (buffer == NULL) { return NULL; } raw_digest = g_compute_checksum_for_string(G_CHECKSUM_MD5, buffer, -1); if (raw_digest == NULL) { crm_err("Failed to calculate hash"); return NULL; } digest = pcmk__str_copy(raw_digest); g_free(raw_digest); crm_trace("Digest %s.", digest); return digest; } // Return true if a is an attribute that should be filtered static bool should_filter_for_digest(xmlAttrPtr a, void *user_data) { if (strncmp((const char *) a->name, CRM_META "_", sizeof(CRM_META " ") - 1) == 0) { return true; } return pcmk__str_any_of((const char *) a->name, PCMK_XA_ID, PCMK_XA_CRM_FEATURE_SET, PCMK__XA_OP_DIGEST, PCMK__META_ON_NODE, PCMK__META_ON_NODE_UUID, "pcmk_external_ip", NULL); } /*! * \internal * \brief Remove XML attributes not needed for operation digest * * \param[in,out] param_set XML with operation parameters */ void pcmk__filter_op_for_digest(xmlNode *param_set) { char *key = NULL; char *timeout = NULL; guint interval_ms = 0; if (param_set == NULL) { return; } /* Timeout is useful for recurring operation digests, so grab it before * removing meta-attributes */ key = crm_meta_name(PCMK_META_INTERVAL); pcmk__xe_get_guint(param_set, key, &interval_ms); free(key); key = NULL; if (interval_ms != 0) { key = crm_meta_name(PCMK_META_TIMEOUT); - timeout = crm_element_value_copy(param_set, key); + timeout = pcmk__xe_get_copy(param_set, key); } // Remove all CRM_meta_* attributes and certain other attributes pcmk__xe_remove_matching_attrs(param_set, false, should_filter_for_digest, NULL); // Add timeout back for recurring operation digests if (timeout != NULL) { crm_xml_add(param_set, key, timeout); } free(timeout); free(key); } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include #include char * calculate_on_disk_digest(xmlNode *input) { return calculate_xml_digest_v1(input); } char * calculate_operation_digest(xmlNode *input, const char *version) { xmlNode *sorted = sorted_xml(input, NULL, true); char *digest = calculate_xml_digest_v1(sorted); pcmk__xml_free(sorted); return digest; } char * calculate_xml_versioned_digest(xmlNode *input, gboolean sort, gboolean do_filter, const char *version) { if ((version == NULL) || (compare_version("3.0.5", version) > 0)) { xmlNode *sorted = NULL; char *digest = NULL; if (sort) { xmlNode *sorted = sorted_xml(input, NULL, true); input = sorted; } crm_trace("Using v1 digest algorithm for %s", pcmk__s(version, "unknown feature set")); digest = calculate_xml_digest_v1(input); pcmk__xml_free(sorted); return digest; } crm_trace("Using v2 digest algorithm for %s", version); return pcmk__digest_xml(input, do_filter); } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/common/schemas.c b/lib/common/schemas.c index 0cb33fdae2..4fd933683a 100644 --- a/lib/common/schemas.c +++ b/lib/common/schemas.c @@ -1,1602 +1,1602 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include // UCHAR_MAX #include #include #include #include // xmlNode #include // xmlChar #include #include #include #include #include #include #include /* PCMK__XML_LOG_BASE */ #include "crmcommon_private.h" #define SCHEMA_ZERO { .v = { 0, 0 } } #define schema_strdup_printf(prefix, version, suffix) \ crm_strdup_printf(prefix "%u.%u" suffix, (version).v[0], (version).v[1]) typedef struct { xmlRelaxNGPtr rng; xmlRelaxNGValidCtxtPtr valid; xmlRelaxNGParserCtxtPtr parser; } relaxng_ctx_cache_t; static GList *known_schemas = NULL; static bool initialized = false; static bool silent_logging = FALSE; static void G_GNUC_PRINTF(2, 3) xml_log(int priority, const char *fmt, ...) { va_list ap; va_start(ap, fmt); if (silent_logging == FALSE) { /* XXX should not this enable dechunking as well? */ PCMK__XML_LOG_BASE(priority, FALSE, 0, NULL, fmt, ap); } va_end(ap); } static int xml_latest_schema_index(void) { /* This function assumes that pcmk__schema_init() has been called * beforehand, so we have at least two schemas (one real schema and the * "none" schema). * * @COMPAT: The "none" schema is deprecated since 2.1.8. * Update this when we drop that schema. */ return g_list_length(known_schemas) - 2; } /*! * \internal * \brief Return the schema entry of the highest-versioned schema * * \return Schema entry of highest-versioned schema */ static GList * get_highest_schema(void) { /* The highest numerically versioned schema is the one before none * * @COMPAT none is deprecated since 2.1.8 */ GList *entry = pcmk__get_schema("none"); pcmk__assert((entry != NULL) && (entry->prev != NULL)); return entry->prev; } /*! * \internal * \brief Return the name of the highest-versioned schema * * \return Name of highest-versioned schema (or NULL on error) */ const char * pcmk__highest_schema_name(void) { GList *entry = get_highest_schema(); return ((pcmk__schema_t *)(entry->data))->name; } /*! * \internal * \brief Find first entry of highest major schema version series * * \return Schema entry of first schema with highest major version */ GList * pcmk__find_x_0_schema(void) { #if defined(PCMK__UNIT_TESTING) /* If we're unit testing, this can't be static because it'll stick * around from one test run to the next. It needs to be cleared out * every time. */ GList *x_0_entry = NULL; #else static GList *x_0_entry = NULL; #endif pcmk__schema_t *highest_schema = NULL; if (x_0_entry != NULL) { return x_0_entry; } x_0_entry = get_highest_schema(); highest_schema = x_0_entry->data; for (GList *iter = x_0_entry->prev; iter != NULL; iter = iter->prev) { pcmk__schema_t *schema = iter->data; /* We've found a schema in an older major version series. Return * the index of the first one in the same major version series as * the highest schema. */ if (schema->version.v[0] < highest_schema->version.v[0]) { x_0_entry = iter->next; break; } /* We're out of list to examine. This probably means there was only * one major version series, so return the first schema entry. */ if (iter->prev == NULL) { x_0_entry = known_schemas->data; break; } } return x_0_entry; } static inline bool version_from_filename(const char *filename, pcmk__schema_version_t *version) { if (pcmk__ends_with(filename, ".rng")) { return sscanf(filename, "pacemaker-%hhu.%hhu.rng", &(version->v[0]), &(version->v[1])) == 2; } else { return sscanf(filename, "pacemaker-%hhu.%hhu", &(version->v[0]), &(version->v[1])) == 2; } } static int schema_filter(const struct dirent *a) { int rc = 0; pcmk__schema_version_t version = SCHEMA_ZERO; if (strstr(a->d_name, "pacemaker-") != a->d_name) { /* crm_trace("%s - wrong prefix", a->d_name); */ } else if (!pcmk__ends_with_ext(a->d_name, ".rng")) { /* crm_trace("%s - wrong suffix", a->d_name); */ } else if (!version_from_filename(a->d_name, &version)) { /* crm_trace("%s - wrong format", a->d_name); */ } else { /* crm_debug("%s - candidate", a->d_name); */ rc = 1; } return rc; } static int schema_cmp(pcmk__schema_version_t a_version, pcmk__schema_version_t b_version) { for (int i = 0; i < 2; ++i) { if (a_version.v[i] < b_version.v[i]) { return -1; } else if (a_version.v[i] > b_version.v[i]) { return 1; } } return 0; } static int schema_cmp_directory(const struct dirent **a, const struct dirent **b) { pcmk__schema_version_t a_version = SCHEMA_ZERO; pcmk__schema_version_t b_version = SCHEMA_ZERO; if (!version_from_filename(a[0]->d_name, &a_version) || !version_from_filename(b[0]->d_name, &b_version)) { // Shouldn't be possible, but makes static analysis happy return 0; } return schema_cmp(a_version, b_version); } /*! * \internal * \brief Add given schema + auxiliary data to internal bookkeeping. */ static void add_schema(enum pcmk__schema_validator validator, const pcmk__schema_version_t *version, const char *name, GList *transforms) { pcmk__schema_t *schema = NULL; schema = pcmk__assert_alloc(1, sizeof(pcmk__schema_t)); schema->validator = validator; schema->version.v[0] = version->v[0]; schema->version.v[1] = version->v[1]; schema->transforms = transforms; // schema->schema_index is set after all schemas are loaded and sorted if (version->v[0] || version->v[1]) { schema->name = schema_strdup_printf("pacemaker-", *version, ""); } else { schema->name = pcmk__str_copy(name); } known_schemas = g_list_prepend(known_schemas, schema); } static void wrap_libxslt(bool finalize) { static xsltSecurityPrefsPtr secprefs; int ret = 0; /* security framework preferences */ if (!finalize) { pcmk__assert(secprefs == NULL); secprefs = xsltNewSecurityPrefs(); ret = xsltSetSecurityPrefs(secprefs, XSLT_SECPREF_WRITE_FILE, xsltSecurityForbid) | xsltSetSecurityPrefs(secprefs, XSLT_SECPREF_CREATE_DIRECTORY, xsltSecurityForbid) | xsltSetSecurityPrefs(secprefs, XSLT_SECPREF_READ_NETWORK, xsltSecurityForbid) | xsltSetSecurityPrefs(secprefs, XSLT_SECPREF_WRITE_NETWORK, xsltSecurityForbid); if (ret != 0) { return; } } else { xsltFreeSecurityPrefs(secprefs); secprefs = NULL; } /* cleanup only */ if (finalize) { xsltCleanupGlobals(); } } /*! * \internal * \brief Check whether a directory entry matches the upgrade XSLT pattern * * \param[in] entry Directory entry whose filename to check * * \return 1 if the entry's filename is of the form * upgrade-X.Y-ORDER.xsl with each number in the range 0 to * 255, or 0 otherwise */ static int transform_filter(const struct dirent *entry) { const char *re = NULL; unsigned int major = 0; unsigned int minor = 0; unsigned int order = 0; /* Each number is an unsigned char, which is 1 to 3 digits long. (Pacemaker * requires an 8-bit char via a configure test.) */ re = "upgrade-[[:digit:]]{1,3}\\.[[:digit:]]{1,3}-[[:digit:]]{1,3}\\.xsl"; if (!pcmk__str_eq(entry->d_name, re, pcmk__str_regex)) { return 0; } /* Performance isn't critical here and this is simpler than range-checking * within the regex */ if (sscanf(entry->d_name, "upgrade-%u.%u-%u.xsl", &major, &minor, &order) != 3) { return 0; } if ((major > UCHAR_MAX) || (minor > UCHAR_MAX) || (order > UCHAR_MAX)) { return 0; } return 1; } /*! * \internal * \brief Compare transform files based on the version strings in their names * * This is a crude version comparison that relies on the specific structure of * these filenames. * * \retval -1 if \p entry1 sorts before \p entry2 * \retval 0 if \p entry1 sorts equal to \p entry2 * \retval 1 if \p entry1 sorts after \p entry2 * * \note The GNU \c versionsort() function would be perfect here, but it's not * portable. */ static int compare_transforms(const struct dirent **entry1, const struct dirent **entry2) { unsigned char major1 = 0; unsigned char major2 = 0; unsigned char minor1 = 0; unsigned char minor2 = 0; unsigned char order1 = 0; unsigned char order2 = 0; // If these made it through the filter, they should be of the right format CRM_LOG_ASSERT(sscanf((*entry1)->d_name, "upgrade-%hhu.%hhu-%hhu.xsl", &major1, &minor1, &order1) == 3); CRM_LOG_ASSERT(sscanf((*entry2)->d_name, "upgrade-%hhu.%hhu-%hhu.xsl", &major2, &minor2, &order2) == 3); if (major1 < major2) { return -1; } else if (major1 > major2) { return 1; } if (minor1 < minor2) { return -1; } else if (minor1 > minor2) { return 1; } if (order1 < order2) { return -1; } else if (order1 > order2) { return 1; } return 0; } /*! * \internal * \brief Free a list of XSLT transform struct dirent objects * * \param[in,out] data List to free */ static void free_transform_list(void *data) { g_list_free_full((GList *) data, free); } /*! * \internal * \brief Load names of upgrade XSLT stylesheets from a directory into a table * * Stylesheets must have names of the form "upgrade-X.Y-order.xsl", where: * * X is the schema major version * * Y is the schema minor version * * ORDER is the order in which the stylesheet occurs in the transform pipeline * * \param[in] dir Directory containing XSLT stylesheets * * \return Table with schema version as key and \c GList of associated transform * files (as struct dirent) as value */ static GHashTable * load_transforms_from_dir(const char *dir) { struct dirent **namelist = NULL; GHashTable *transforms = NULL; int num_matches = scandir(dir, &namelist, transform_filter, compare_transforms); if (num_matches < 0) { int rc = errno; crm_warn("Could not load transforms from %s: %s", dir, pcmk_rc_str(rc)); goto done; } transforms = pcmk__strkey_table(free, free_transform_list); for (int i = 0; i < num_matches; i++) { pcmk__schema_version_t version = SCHEMA_ZERO; unsigned char order = 0; // Placeholder only if (sscanf(namelist[i]->d_name, "upgrade-%hhu.%hhu-%hhu.xsl", &(version.v[0]), &(version.v[1]), &order) == 3) { char *version_s = crm_strdup_printf("%hhu.%hhu", version.v[0], version.v[1]); GList *list = g_hash_table_lookup(transforms, version_s); if (list == NULL) { /* Prepend is more efficient. However, there won't be many of * these, and we want them to remain sorted by version. It's not * worth reversing all the lists at the end. * * Avoid calling g_hash_table_insert() if the list already * exists. Otherwise free_transform_list() gets called on it. */ list = g_list_append(list, namelist[i]); g_hash_table_insert(transforms, version_s, list); } else { list = g_list_append(list, namelist[i]); free(version_s); } } else { // Sanity only, should never happen thanks to transform_filter() free(namelist[i]); } } done: free(namelist); return transforms; } void pcmk__load_schemas_from_dir(const char *dir) { int lpc, max; struct dirent **namelist = NULL; GHashTable *transforms = NULL; max = scandir(dir, &namelist, schema_filter, schema_cmp_directory); if (max < 0) { int rc = errno; crm_warn("Could not load schemas from %s: %s", dir, pcmk_rc_str(rc)); goto done; } // Look for any upgrade transforms in the same directory transforms = load_transforms_from_dir(dir); for (lpc = 0; lpc < max; lpc++) { pcmk__schema_version_t version = SCHEMA_ZERO; if (version_from_filename(namelist[lpc]->d_name, &version)) { char *version_s = crm_strdup_printf("%hhu.%hhu", version.v[0], version.v[1]); char *orig_key = NULL; GList *transform_list = NULL; if (transforms != NULL) { // The schema becomes the owner of transform_list g_hash_table_lookup_extended(transforms, version_s, (gpointer *) &orig_key, (gpointer *) &transform_list); g_hash_table_steal(transforms, version_s); } add_schema(pcmk__schema_validator_rng, &version, NULL, transform_list); free(version_s); free(orig_key); } else { // Shouldn't be possible, but makes static analysis happy crm_warn("Skipping schema '%s': could not parse version", namelist[lpc]->d_name); } } for (lpc = 0; lpc < max; lpc++) { free(namelist[lpc]); } done: free(namelist); if (transforms != NULL) { g_hash_table_destroy(transforms); } } static gint schema_sort_GCompareFunc(gconstpointer a, gconstpointer b) { const pcmk__schema_t *schema_a = a; const pcmk__schema_t *schema_b = b; // @COMPAT The "none" schema is deprecated since 2.1.8 if (pcmk__str_eq(schema_a->name, PCMK_VALUE_NONE, pcmk__str_none)) { return 1; } else if (pcmk__str_eq(schema_b->name, PCMK_VALUE_NONE, pcmk__str_none)) { return -1; } else { return schema_cmp(schema_a->version, schema_b->version); } } /*! * \internal * \brief Sort the list of known schemas such that all pacemaker-X.Y are in * version order, then "none" * * This function should be called whenever additional schemas are loaded using * \c pcmk__load_schemas_from_dir(), after the initial sets in * \c pcmk__schema_init(). */ void pcmk__sort_schemas(void) { known_schemas = g_list_sort(known_schemas, schema_sort_GCompareFunc); } /*! * \internal * \brief Load pacemaker schemas into cache * * \note This currently also serves as an entry point for the * generic initialization of the libxslt library. */ void pcmk__schema_init(void) { if (!initialized) { const char *remote_schema_dir = pcmk__remote_schema_dir(); char *base = pcmk__xml_artefact_root(pcmk__xml_artefact_ns_legacy_rng); const pcmk__schema_version_t zero = SCHEMA_ZERO; int schema_index = 0; initialized = true; wrap_libxslt(false); pcmk__load_schemas_from_dir(base); pcmk__load_schemas_from_dir(remote_schema_dir); free(base); // @COMPAT Deprecated since 2.1.8 add_schema(pcmk__schema_validator_none, &zero, PCMK_VALUE_NONE, NULL); /* add_schema() prepends items to the list, so in the simple case, this * just reverses the list. However if there were any remote schemas, * sorting is necessary. */ pcmk__sort_schemas(); // Now set the schema indexes and log the final result for (GList *iter = known_schemas; iter != NULL; iter = iter->next) { pcmk__schema_t *schema = iter->data; crm_debug("Loaded schema %d: %s", schema_index, schema->name); schema->schema_index = schema_index++; } } } static bool validate_with_relaxng(xmlDocPtr doc, xmlRelaxNGValidityErrorFunc error_handler, void *error_handler_context, const char *relaxng_file, relaxng_ctx_cache_t **cached_ctx) { int rc = 0; bool valid = true; relaxng_ctx_cache_t *ctx = NULL; CRM_CHECK(doc != NULL, return false); CRM_CHECK(relaxng_file != NULL, return false); if (cached_ctx && *cached_ctx) { ctx = *cached_ctx; } else { crm_debug("Creating RNG parser context"); ctx = pcmk__assert_alloc(1, sizeof(relaxng_ctx_cache_t)); ctx->parser = xmlRelaxNGNewParserCtxt(relaxng_file); CRM_CHECK(ctx->parser != NULL, goto cleanup); if (error_handler) { xmlRelaxNGSetParserErrors(ctx->parser, (xmlRelaxNGValidityErrorFunc) error_handler, (xmlRelaxNGValidityWarningFunc) error_handler, error_handler_context); } else { xmlRelaxNGSetParserErrors(ctx->parser, (xmlRelaxNGValidityErrorFunc) fprintf, (xmlRelaxNGValidityWarningFunc) fprintf, stderr); } ctx->rng = xmlRelaxNGParse(ctx->parser); CRM_CHECK(ctx->rng != NULL, crm_err("Could not find/parse %s", relaxng_file); goto cleanup); ctx->valid = xmlRelaxNGNewValidCtxt(ctx->rng); CRM_CHECK(ctx->valid != NULL, goto cleanup); if (error_handler) { xmlRelaxNGSetValidErrors(ctx->valid, (xmlRelaxNGValidityErrorFunc) error_handler, (xmlRelaxNGValidityWarningFunc) error_handler, error_handler_context); } else { xmlRelaxNGSetValidErrors(ctx->valid, (xmlRelaxNGValidityErrorFunc) fprintf, (xmlRelaxNGValidityWarningFunc) fprintf, stderr); } } rc = xmlRelaxNGValidateDoc(ctx->valid, doc); if (rc > 0) { valid = false; } else if (rc < 0) { crm_err("Internal libxml error during validation"); } cleanup: if (cached_ctx) { *cached_ctx = ctx; } else { if (ctx->parser != NULL) { xmlRelaxNGFreeParserCtxt(ctx->parser); } if (ctx->valid != NULL) { xmlRelaxNGFreeValidCtxt(ctx->valid); } if (ctx->rng != NULL) { xmlRelaxNGFree(ctx->rng); } free(ctx); } return valid; } static void free_schema(gpointer data) { pcmk__schema_t *schema = data; relaxng_ctx_cache_t *ctx = NULL; switch (schema->validator) { case pcmk__schema_validator_none: // not cached break; case pcmk__schema_validator_rng: // cached ctx = (relaxng_ctx_cache_t *) schema->cache; if (ctx == NULL) { break; } if (ctx->parser != NULL) { xmlRelaxNGFreeParserCtxt(ctx->parser); } if (ctx->valid != NULL) { xmlRelaxNGFreeValidCtxt(ctx->valid); } if (ctx->rng != NULL) { xmlRelaxNGFree(ctx->rng); } free(ctx); schema->cache = NULL; break; } free(schema->name); g_list_free_full(schema->transforms, free); free(schema); } /*! * \internal * \brief Clean up global memory associated with XML schemas */ void pcmk__schema_cleanup(void) { if (known_schemas != NULL) { g_list_free_full(known_schemas, free_schema); known_schemas = NULL; } initialized = false; wrap_libxslt(true); } /*! * \internal * \brief Get schema list entry corresponding to a schema name * * \param[in] name Name of schema to get * * \return Schema list entry corresponding to \p name, or NULL if unknown */ GList * pcmk__get_schema(const char *name) { if (name == NULL) { return NULL; } for (GList *iter = known_schemas; iter != NULL; iter = iter->next) { pcmk__schema_t *schema = iter->data; if (pcmk__str_eq(name, schema->name, pcmk__str_none)) { return iter; } } return NULL; } /*! * \internal * \brief Compare two schema version numbers given the schema names * * \param[in] schema1 Name of first schema to compare * \param[in] schema2 Name of second schema to compare * * \return Standard comparison result (negative integer if \p schema1 has the * lower version number, positive integer if \p schema1 has the higher * version number, of 0 if the version numbers are equal) */ int pcmk__cmp_schemas_by_name(const char *schema1_name, const char *schema2_name) { GList *entry1 = pcmk__get_schema(schema1_name); GList *entry2 = pcmk__get_schema(schema2_name); if (entry1 == NULL) { return (entry2 == NULL)? 0 : -1; } else if (entry2 == NULL) { return 1; } else { pcmk__schema_t *schema1 = entry1->data; pcmk__schema_t *schema2 = entry2->data; return schema1->schema_index - schema2->schema_index; } } static bool validate_with(xmlNode *xml, pcmk__schema_t *schema, xmlRelaxNGValidityErrorFunc error_handler, void *error_handler_context) { bool valid = false; char *file = NULL; relaxng_ctx_cache_t **cache = NULL; if (schema == NULL) { return false; } if (schema->validator == pcmk__schema_validator_none) { return true; } file = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_rng, schema->name); crm_trace("Validating with %s (type=%d)", pcmk__s(file, "missing schema"), schema->validator); switch (schema->validator) { case pcmk__schema_validator_rng: cache = (relaxng_ctx_cache_t **) &(schema->cache); valid = validate_with_relaxng(xml->doc, error_handler, error_handler_context, file, cache); break; default: crm_err("Unknown validator type: %d", schema->validator); break; } free(file); return valid; } static bool validate_with_silent(xmlNode *xml, pcmk__schema_t *schema) { bool rc, sl_backup = silent_logging; silent_logging = TRUE; rc = validate_with(xml, schema, (xmlRelaxNGValidityErrorFunc) xml_log, GUINT_TO_POINTER(LOG_ERR)); silent_logging = sl_backup; return rc; } bool pcmk__validate_xml(xmlNode *xml_blob, const char *validation, xmlRelaxNGValidityErrorFunc error_handler, void *error_handler_context) { GList *entry = NULL; pcmk__schema_t *schema = NULL; CRM_CHECK((xml_blob != NULL) && (xml_blob->doc != NULL), return false); if (validation == NULL) { validation = pcmk__xe_get(xml_blob, PCMK_XA_VALIDATE_WITH); } pcmk__warn_if_schema_deprecated(validation); entry = pcmk__get_schema(validation); if (entry == NULL) { pcmk__config_err("Cannot validate CIB with %s " PCMK_XA_VALIDATE_WITH " (manually edit to use a known schema)", ((validation == NULL)? "missing" : "unknown")); return false; } schema = entry->data; return validate_with(xml_blob, schema, error_handler, error_handler_context); } /*! * \internal * \brief Validate XML using its configured schema (and send errors to logs) * * \param[in] xml XML to validate * * \return true if XML validates, otherwise false */ bool pcmk__configured_schema_validates(xmlNode *xml) { return pcmk__validate_xml(xml, NULL, (xmlRelaxNGValidityErrorFunc) xml_log, GUINT_TO_POINTER(LOG_ERR)); } /* With this arrangement, an attempt to identify the message severity as explicitly signalled directly from XSLT is performed in rather a smart way (no reliance on formatting string + arguments being always specified as ["%s", purposeful_string], as it can also be ["%s: %s", some_prefix, purposeful_string] etc. so every argument pertaining %s specifier is investigated), and if such a mark found, the respective level is determined and, when the messages are to go to the native logs, the mark itself gets dropped (by the means of string shift). NOTE: whether the native logging is the right sink is decided per the ctx parameter -- NULL denotes this case, otherwise it carries a pointer to the numeric expression of the desired target logging level (messages with higher level will be suppressed) NOTE: on some architectures, this string shift may not have any effect, but that's an acceptable tradeoff The logging level for not explicitly designated messages (suspicious, likely internal errors or some runaways) is LOG_WARNING. */ static void G_GNUC_PRINTF(2, 3) cib_upgrade_err(void *ctx, const char *fmt, ...) { va_list ap, aq; char *arg_cur; bool found = false; const char *fmt_iter = fmt; uint8_t msg_log_level = LOG_WARNING; /* default for runaway messages */ const unsigned * log_level = (const unsigned *) ctx; enum { escan_seennothing, escan_seenpercent, } scan_state = escan_seennothing; va_start(ap, fmt); va_copy(aq, ap); while (!found && *fmt_iter != '\0') { /* while casing schema borrowed from libqb:qb_vsnprintf_serialize */ switch (*fmt_iter++) { case '%': if (scan_state == escan_seennothing) { scan_state = escan_seenpercent; } else if (scan_state == escan_seenpercent) { scan_state = escan_seennothing; } break; case 's': if (scan_state == escan_seenpercent) { size_t prefix_len = 0; scan_state = escan_seennothing; arg_cur = va_arg(aq, char *); if (pcmk__starts_with(arg_cur, "WARNING: ")) { prefix_len = sizeof("WARNING: ") - 1; msg_log_level = LOG_WARNING; } else if (pcmk__starts_with(arg_cur, "INFO: ")) { prefix_len = sizeof("INFO: ") - 1; msg_log_level = LOG_INFO; } else if (pcmk__starts_with(arg_cur, "DEBUG: ")) { prefix_len = sizeof("DEBUG: ") - 1; msg_log_level = LOG_DEBUG; } else { break; } found = true; if (ctx == NULL) { memmove(arg_cur, arg_cur + prefix_len, strlen(arg_cur + prefix_len) + 1); } } break; case '#': case '-': case ' ': case '+': case '\'': case 'I': case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '*': break; case 'l': case 'z': case 't': case 'j': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'e': case 'E': case 'f': case 'F': case 'g': case 'G': case 'a': case 'A': case 'c': case 'p': if (scan_state == escan_seenpercent) { (void) va_arg(aq, void *); /* skip forward */ scan_state = escan_seennothing; } break; default: scan_state = escan_seennothing; break; } } if (log_level != NULL) { /* intention of the following offset is: cibadmin -V -> start showing INFO labelled messages */ if (*log_level + 4 >= msg_log_level) { vfprintf(stderr, fmt, ap); } } else { PCMK__XML_LOG_BASE(msg_log_level, TRUE, 0, "CIB upgrade: ", fmt, ap); } va_end(aq); va_end(ap); } /*! * \internal * \brief Apply a single XSL transformation to given XML * * \param[in] xml XML to transform * \param[in] transform XSL name * \param[in] to_logs If false, certain validation errors will be sent to * stderr rather than logged * * \return Transformed XML on success, otherwise NULL */ static xmlNode * apply_transformation(const xmlNode *xml, const char *transform, gboolean to_logs) { char *xform = NULL; xmlNode *out = NULL; xmlDocPtr res = NULL; xsltStylesheet *xslt = NULL; xform = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_xslt, transform); /* for capturing, e.g., what's emitted via */ if (to_logs) { xsltSetGenericErrorFunc(NULL, cib_upgrade_err); } else { xsltSetGenericErrorFunc(&crm_log_level, cib_upgrade_err); } xslt = xsltParseStylesheetFile((const xmlChar *) xform); CRM_CHECK(xslt != NULL, goto cleanup); /* Caller allocates private data for final result document. Intermediate * result documents are temporary and don't need private data. */ res = xsltApplyStylesheet(xslt, xml->doc, NULL); CRM_CHECK(res != NULL, goto cleanup); xsltSetGenericErrorFunc(NULL, NULL); /* restore default one */ out = xmlDocGetRootElement(res); cleanup: if (xslt) { xsltFreeStylesheet(xslt); } free(xform); return out; } /*! * \internal * \brief Perform all transformations needed to upgrade XML to next schema * * \param[in] input_xml XML to transform * \param[in] schema_index Index of schema that successfully validates * \p original_xml * \param[in] to_logs If false, certain validation errors will be sent to * stderr rather than logged * * \return XML result of schema transforms if successful, otherwise NULL */ static xmlNode * apply_upgrade(const xmlNode *input_xml, int schema_index, gboolean to_logs) { pcmk__schema_t *schema = g_list_nth_data(known_schemas, schema_index); pcmk__schema_t *upgraded_schema = g_list_nth_data(known_schemas, schema_index + 1); xmlNode *old_xml = NULL; xmlNode *new_xml = NULL; xmlRelaxNGValidityErrorFunc error_handler = NULL; pcmk__assert((schema != NULL) && (upgraded_schema != NULL)); if (to_logs) { error_handler = (xmlRelaxNGValidityErrorFunc) xml_log; } for (GList *iter = schema->transforms; iter != NULL; iter = iter->next) { const struct dirent *entry = iter->data; const char *transform = entry->d_name; crm_debug("Upgrading schema from %s to %s: applying XSL transform %s", schema->name, upgraded_schema->name, transform); new_xml = apply_transformation(input_xml, transform, to_logs); pcmk__xml_free(old_xml); if (new_xml == NULL) { crm_err("XSL transform %s failed, aborting upgrade", transform); return NULL; } input_xml = new_xml; old_xml = new_xml; } // Final result document from upgrade pipeline needs private data pcmk__xml_new_private_data((xmlNode *) new_xml->doc); // Ensure result validates with its new schema if (!validate_with(new_xml, upgraded_schema, error_handler, GUINT_TO_POINTER(LOG_ERR))) { crm_err("Schema upgrade from %s to %s failed: " "XSL transform pipeline produced an invalid configuration", schema->name, upgraded_schema->name); crm_log_xml_debug(new_xml, "bad-transform-result"); pcmk__xml_free(new_xml); return NULL; } crm_info("Schema upgrade from %s to %s succeeded", schema->name, upgraded_schema->name); return new_xml; } /*! * \internal * \brief Get the schema list entry corresponding to XML configuration * * \param[in] xml CIB XML to check * * \return List entry of schema configured in \p xml */ static GList * get_configured_schema(const xmlNode *xml) { const char *schema_name = pcmk__xe_get(xml, PCMK_XA_VALIDATE_WITH); pcmk__warn_if_schema_deprecated(schema_name); return pcmk__get_schema(schema_name); } /*! * \brief Update CIB XML to latest schema that validates it * * \param[in,out] xml XML to update (may be freed and replaced * after being transformed) * \param[in] max_schema_name If not NULL, do not update \p xml to any * schema later than this one * \param[in] transform If false, do not update \p xml to any schema * that requires an XSL transform * \param[in] to_logs If false, certain validation errors will be * sent to stderr rather than logged * * \return Standard Pacemaker return code */ int pcmk__update_schema(xmlNode **xml, const char *max_schema_name, bool transform, bool to_logs) { int max_stable_schemas = xml_latest_schema_index(); int max_schema_index = 0; int rc = pcmk_rc_ok; GList *entry = NULL; pcmk__schema_t *best_schema = NULL; pcmk__schema_t *original_schema = NULL; xmlRelaxNGValidityErrorFunc error_handler = to_logs ? (xmlRelaxNGValidityErrorFunc) xml_log : NULL; CRM_CHECK((xml != NULL) && (*xml != NULL) && ((*xml)->doc != NULL), return EINVAL); if (max_schema_name != NULL) { GList *max_entry = pcmk__get_schema(max_schema_name); if (max_entry != NULL) { pcmk__schema_t *max_schema = max_entry->data; max_schema_index = max_schema->schema_index; } } if ((max_schema_index < 1) || (max_schema_index > max_stable_schemas)) { max_schema_index = max_stable_schemas; } entry = get_configured_schema(*xml); if (entry == NULL) { return pcmk_rc_cib_corrupt; } original_schema = entry->data; if (original_schema->schema_index >= max_schema_index) { return pcmk_rc_ok; } for (; entry != NULL; entry = entry->next) { pcmk__schema_t *current_schema = entry->data; xmlNode *upgrade = NULL; if (current_schema->schema_index > max_schema_index) { break; } if (!validate_with(*xml, current_schema, error_handler, GUINT_TO_POINTER(LOG_ERR))) { crm_debug("Schema %s does not validate", current_schema->name); if (best_schema != NULL) { /* we've satisfied the validation, no need to check further */ break; } rc = pcmk_rc_schema_validation; continue; // Try again with the next higher schema } crm_debug("Schema %s validates", current_schema->name); rc = pcmk_rc_ok; best_schema = current_schema; if (current_schema->schema_index == max_schema_index) { break; // No further transformations possible } if (!transform || (current_schema->transforms == NULL) || validate_with_silent(*xml, entry->next->data)) { /* The next schema either doesn't require a transform or validates * successfully even without the transform. Skip the transform and * try the next schema with the same XML. */ continue; } upgrade = apply_upgrade(*xml, current_schema->schema_index, to_logs); if (upgrade == NULL) { /* The transform failed, so this schema can't be used. Later * schemas are unlikely to validate, but try anyway until we * run out of options. */ rc = pcmk_rc_transform_failed; } else { best_schema = current_schema; pcmk__xml_free(*xml); *xml = upgrade; } } if ((best_schema != NULL) && (best_schema->schema_index > original_schema->schema_index)) { crm_info("%s the configuration schema to %s", (transform? "Transformed" : "Upgraded"), best_schema->name); crm_xml_add(*xml, PCMK_XA_VALIDATE_WITH, best_schema->name); } return rc; } int pcmk_update_configured_schema(xmlNode **xml) { return pcmk__update_configured_schema(xml, true); } /*! * \brief Update XML from its configured schema to the latest major series * * \param[in,out] xml XML to update * \param[in] to_logs If false, certain validation errors will be * sent to stderr rather than logged * * \return Standard Pacemaker return code */ int pcmk__update_configured_schema(xmlNode **xml, bool to_logs) { pcmk__schema_t *x_0_schema = pcmk__find_x_0_schema()->data; pcmk__schema_t *original_schema = NULL; GList *entry = NULL; if (xml == NULL) { return EINVAL; } entry = get_configured_schema(*xml); if (entry == NULL) { return pcmk_rc_cib_corrupt; } original_schema = entry->data; if (original_schema->schema_index < x_0_schema->schema_index) { // Current configuration schema is not acceptable, try to update xmlNode *converted = NULL; const char *new_schema_name = NULL; pcmk__schema_t *schema = NULL; entry = NULL; converted = pcmk__xml_copy(NULL, *xml); if (pcmk__update_schema(&converted, NULL, true, to_logs) == pcmk_rc_ok) { new_schema_name = pcmk__xe_get(converted, PCMK_XA_VALIDATE_WITH); entry = pcmk__get_schema(new_schema_name); } schema = (entry == NULL)? NULL : entry->data; if ((schema == NULL) || (schema->schema_index < x_0_schema->schema_index)) { // Updated configuration schema is still not acceptable if ((schema == NULL) || (schema->schema_index < original_schema->schema_index)) { // We couldn't validate any schema at all if (to_logs) { pcmk__config_err("Cannot upgrade configuration (claiming " "%s schema) to at least %s because it " "does not validate with any schema from " "%s to the latest", original_schema->name, x_0_schema->name, original_schema->name); } else { fprintf(stderr, "Cannot upgrade configuration (claiming " "%s schema) to at least %s because it " "does not validate with any schema from " "%s to the latest\n", original_schema->name, x_0_schema->name, original_schema->name); } } else { // We updated configuration successfully, but still too low if (to_logs) { pcmk__config_err("Cannot upgrade configuration (claiming " "%s schema) to at least %s because it " "would not upgrade past %s", original_schema->name, x_0_schema->name, pcmk__s(new_schema_name, "unspecified version")); } else { fprintf(stderr, "Cannot upgrade configuration (claiming " "%s schema) to at least %s because it " "would not upgrade past %s\n", original_schema->name, x_0_schema->name, pcmk__s(new_schema_name, "unspecified version")); } } pcmk__xml_free(converted); converted = NULL; return pcmk_rc_transform_failed; } else { // Updated configuration schema is acceptable pcmk__xml_free(*xml); *xml = converted; if (schema->schema_index < xml_latest_schema_index()) { if (to_logs) { pcmk__config_warn("Configuration with %s schema was " "internally upgraded to acceptable (but " "not most recent) %s", original_schema->name, schema->name); } } else if (to_logs) { crm_info("Configuration with %s schema was internally " "upgraded to latest version %s", original_schema->name, schema->name); } } } else if (!to_logs) { pcmk__schema_t *none_schema = NULL; entry = pcmk__get_schema(PCMK_VALUE_NONE); pcmk__assert((entry != NULL) && (entry->data != NULL)); none_schema = entry->data; if (original_schema->schema_index >= none_schema->schema_index) { // @COMPAT the none schema is deprecated since 2.1.8 fprintf(stderr, "Schema validation of configuration is " "disabled (support for " PCMK_XA_VALIDATE_WITH " set to \"" PCMK_VALUE_NONE "\" is deprecated" " and will be removed in a future release)\n"); } } return pcmk_rc_ok; } /*! * \internal * \brief Return a list of all schema files and any associated XSLT files * later than the given one * \brief Return a list of all schema versions later than the given one * * \param[in] schema The schema to compare against (for example, * "pacemaker-3.1.rng" or "pacemaker-3.1") * * \note The caller is responsible for freeing both the returned list and * the elements of the list */ GList * pcmk__schema_files_later_than(const char *name) { GList *lst = NULL; pcmk__schema_version_t ver; if (!version_from_filename(name, &ver)) { return lst; } for (GList *iter = g_list_nth(known_schemas, xml_latest_schema_index()); iter != NULL; iter = iter->prev) { pcmk__schema_t *schema = iter->data; if (schema_cmp(ver, schema->version) != -1) { continue; } for (GList *iter2 = g_list_last(schema->transforms); iter2 != NULL; iter2 = iter2->prev) { const struct dirent *entry = iter2->data; lst = g_list_prepend(lst, pcmk__str_copy(entry->d_name)); } lst = g_list_prepend(lst, crm_strdup_printf("%s.rng", schema->name)); } return lst; } static void append_href(xmlNode *xml, void *user_data) { GList **list = user_data; - char *href = crm_element_value_copy(xml, "href"); + char *href = pcmk__xe_get_copy(xml, "href"); if (href == NULL) { return; } *list = g_list_prepend(*list, href); } static void external_refs_in_schema(GList **list, const char *contents) { /* local-name()= is needed to ignore the xmlns= setting at the top of * the XML file. Otherwise, the xpath query will always return nothing. */ const char *search = "//*[local-name()='externalRef'] | //*[local-name()='include']"; xmlNode *xml = pcmk__xml_parse(contents); pcmk__xpath_foreach_result(xml->doc, search, append_href, list); pcmk__xml_free(xml); } static int read_file_contents(const char *file, char **contents) { int rc = pcmk_rc_ok; char *path = NULL; if (pcmk__ends_with(file, ".rng")) { path = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_rng, file); } else { path = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_xslt, file); } rc = pcmk__file_contents(path, contents); free(path); return rc; } static void add_schema_file_to_xml(xmlNode *parent, const char *file, GList **already_included) { char *contents = NULL; char *path = NULL; xmlNode *file_node = NULL; GList *includes = NULL; int rc = pcmk_rc_ok; /* If we already included this file, don't do so again. */ if (g_list_find_custom(*already_included, file, (GCompareFunc) strcmp) != NULL) { return; } /* Ensure whatever file we were given has a suffix we know about. If not, * just assume it's an RNG file. */ if (!pcmk__ends_with(file, ".rng") && !pcmk__ends_with(file, ".xsl")) { path = crm_strdup_printf("%s.rng", file); } else { path = pcmk__str_copy(file); } rc = read_file_contents(path, &contents); if (rc != pcmk_rc_ok || contents == NULL) { crm_warn("Could not read schema file %s: %s", file, pcmk_rc_str(rc)); free(path); return; } /* Create a new node with the contents of the file * as a CDATA block underneath it. */ file_node = pcmk__xe_create(parent, PCMK__XE_FILE); crm_xml_add(file_node, PCMK_XA_PATH, path); *already_included = g_list_prepend(*already_included, path); xmlAddChild(file_node, xmlNewCDataBlock(parent->doc, (const xmlChar *) contents, strlen(contents))); /* Scan the file for any or nodes and build up * a list of the files they reference. */ external_refs_in_schema(&includes, contents); /* For each referenced file, recurse to add it (and potentially anything it * references, ...) to the XML. */ for (GList *iter = includes; iter != NULL; iter = iter->next) { add_schema_file_to_xml(parent, iter->data, already_included); } free(contents); g_list_free_full(includes, free); } /*! * \internal * \brief Add an XML schema file and all the files it references as children * of a given XML node * * \param[in,out] parent The parent XML node * \param[in] name The schema version to compare against * (for example, "pacemaker-3.1" or "pacemaker-3.1.rng") * \param[in,out] already_included A list of names that have already been added * to the parent node. * * \note The caller is responsible for freeing both the returned list and * the elements of the list */ void pcmk__build_schema_xml_node(xmlNode *parent, const char *name, GList **already_included) { xmlNode *schema_node = pcmk__xe_create(parent, PCMK__XA_SCHEMA); crm_xml_add(schema_node, PCMK_XA_VERSION, name); add_schema_file_to_xml(schema_node, name, already_included); if (schema_node->children == NULL) { // Not needed if empty. May happen if name was invalid, for example. pcmk__xml_free(schema_node); } } /*! * \internal * \brief Return the directory containing any extra schema files that a * Pacemaker Remote node fetched from the cluster */ const char * pcmk__remote_schema_dir(void) { const char *dir = pcmk__env_option(PCMK__ENV_REMOTE_SCHEMA_DIRECTORY); if (pcmk__str_empty(dir)) { return PCMK__REMOTE_SCHEMA_DIR; } return dir; } /*! * \internal * \brief Warn if a given validation schema is deprecated * * \param[in] Schema name to check */ void pcmk__warn_if_schema_deprecated(const char *schema) { /* @COMPAT Disabling validation is deprecated since 2.1.8, but * resource-agents' ocf-shellfuncs (at least as of 4.15.1) uses it */ if (pcmk__str_eq(schema, PCMK_VALUE_NONE, pcmk__str_none)) { pcmk__config_warn("Support for " PCMK_XA_VALIDATE_WITH "='%s' is " "deprecated and will be removed in a future release " "without the possibility of upgrades (manually edit " "to use a supported schema)", schema); } } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include gboolean cli_config_update(xmlNode **xml, int *best_version, gboolean to_logs) { int rc = pcmk__update_configured_schema(xml, to_logs); if (best_version != NULL) { const char *name = pcmk__xe_get(*xml, PCMK_XA_VALIDATE_WITH); if (name == NULL) { *best_version = -1; } else { GList *entry = pcmk__get_schema(name); pcmk__schema_t *schema = (entry == NULL)? NULL : entry->data; *best_version = (schema == NULL)? -1 : schema->schema_index; } } return (rc == pcmk_rc_ok)? TRUE: FALSE; } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/common/xml.c b/lib/common/xml.c index 57f71a16a9..6c0ba49479 100644 --- a/lib/common/xml.c +++ b/lib/common/xml.c @@ -1,1904 +1,1904 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include // uint32_t #include #include #include #include // stat(), S_ISREG, etc. #include #include // gboolean, GString #include // xmlCleanupParser() #include // xmlNode, etc. #include // xmlChar, xmlGetUTF8Char() #include #include #include // PCMK__XML_LOG_BASE, etc. #include "crmcommon_private.h" //! libxml2 supports only XML version 1.0, at least as of libxml2-2.12.5 #define XML_VERSION ((const xmlChar *) "1.0") /*! * \internal * \brief Get a string representation of an XML element type for logging * * \param[in] type XML element type * * \return String representation of \p type */ const char * pcmk__xml_element_type_text(xmlElementType type) { static const char *const element_type_names[] = { [XML_ELEMENT_NODE] = "element", [XML_ATTRIBUTE_NODE] = "attribute", [XML_TEXT_NODE] = "text", [XML_CDATA_SECTION_NODE] = "CDATA section", [XML_ENTITY_REF_NODE] = "entity reference", [XML_ENTITY_NODE] = "entity", [XML_PI_NODE] = "PI", [XML_COMMENT_NODE] = "comment", [XML_DOCUMENT_NODE] = "document", [XML_DOCUMENT_TYPE_NODE] = "document type", [XML_DOCUMENT_FRAG_NODE] = "document fragment", [XML_NOTATION_NODE] = "notation", [XML_HTML_DOCUMENT_NODE] = "HTML document", [XML_DTD_NODE] = "DTD", [XML_ELEMENT_DECL] = "element declaration", [XML_ATTRIBUTE_DECL] = "attribute declaration", [XML_ENTITY_DECL] = "entity declaration", [XML_NAMESPACE_DECL] = "namespace declaration", [XML_XINCLUDE_START] = "XInclude start", [XML_XINCLUDE_END] = "XInclude end", }; // Assumes the numeric values of the indices are in ascending order if ((type < XML_ELEMENT_NODE) || (type > XML_XINCLUDE_END)) { return "unrecognized type"; } return element_type_names[type]; } /*! * \internal * \brief Apply a function to each XML node in a tree (pre-order, depth-first) * * \param[in,out] xml XML tree to traverse * \param[in,out] fn Function to call for each node (returns \c true to * continue traversing the tree or \c false to stop) * \param[in,out] user_data Argument to \p fn * * \return \c false if any \p fn call returned \c false, or \c true otherwise * * \note This function is recursive. */ bool pcmk__xml_tree_foreach(xmlNode *xml, bool (*fn)(xmlNode *, void *), void *user_data) { if (xml == NULL) { return true; } if (!fn(xml, user_data)) { return false; } for (xml = pcmk__xml_first_child(xml); xml != NULL; xml = pcmk__xml_next(xml)) { if (!pcmk__xml_tree_foreach(xml, fn, user_data)) { return false; } } return true; } void pcmk__xml_set_parent_flags(xmlNode *xml, uint64_t flags) { for (; xml != NULL; xml = xml->parent) { xml_node_private_t *nodepriv = xml->_private; if (nodepriv != NULL) { pcmk__set_xml_flags(nodepriv, flags); } } } /*! * \internal * \brief Set flags for an XML document * * \param[in,out] doc XML document * \param[in] flags Group of enum pcmk__xml_flags */ void pcmk__xml_doc_set_flags(xmlDoc *doc, uint32_t flags) { if (doc != NULL) { xml_doc_private_t *docpriv = doc->_private; pcmk__set_xml_flags(docpriv, flags); } } /*! * \internal * \brief Check whether the given flags are set for an XML document * * \param[in] doc XML document to check * \param[in] flags Group of enum pcmk__xml_flags * * \return \c true if all of \p flags are set for \p doc, or \c false otherwise */ bool pcmk__xml_doc_all_flags_set(const xmlDoc *doc, uint32_t flags) { if (doc != NULL) { xml_doc_private_t *docpriv = doc->_private; return (docpriv != NULL) && pcmk_all_flags_set(docpriv->flags, flags); } return false; } // Mark document, element, and all element's parents as changed void pcmk__mark_xml_node_dirty(xmlNode *xml) { if (xml == NULL) { return; } pcmk__xml_doc_set_flags(xml->doc, pcmk__xf_dirty); pcmk__xml_set_parent_flags(xml, pcmk__xf_dirty); } /*! * \internal * \brief Clear flags on an XML node * * \param[in,out] xml XML node whose flags to reset * \param[in,out] user_data Ignored * * \return \c true (to continue traversing the tree) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ bool pcmk__xml_reset_node_flags(xmlNode *xml, void *user_data) { xml_node_private_t *nodepriv = xml->_private; if (nodepriv != NULL) { nodepriv->flags = pcmk__xf_none; } return true; } /*! * \internal * \brief Set the \c pcmk__xf_dirty and \c pcmk__xf_created flags on an XML node * * \param[in,out] xml Node whose flags to set * \param[in] user_data Ignored * * \return \c true (to continue traversing the tree) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool mark_xml_dirty_created(xmlNode *xml, void *user_data) { xml_node_private_t *nodepriv = xml->_private; if (nodepriv != NULL) { pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_created); } return true; } /*! * \internal * \brief Mark an XML tree as dirty and created, and mark its parents dirty * * Also mark the document dirty. * * \param[in,out] xml Tree to mark as dirty and created */ static void mark_xml_tree_dirty_created(xmlNode *xml) { pcmk__assert(xml != NULL); if (!pcmk__xml_doc_all_flags_set(xml->doc, pcmk__xf_tracking)) { // Tracking is disabled for entire document return; } // Mark all parents and document dirty pcmk__mark_xml_node_dirty(xml); pcmk__xml_tree_foreach(xml, mark_xml_dirty_created, NULL); } // Free an XML object previously marked as deleted static void free_deleted_object(void *data) { if(data) { pcmk__deleted_xml_t *deleted_obj = data; g_free(deleted_obj->path); free(deleted_obj); } } // Free and NULL user, ACLs, and deleted objects in an XML node's private data static void reset_xml_private_data(xml_doc_private_t *docpriv) { if (docpriv != NULL) { pcmk__assert(docpriv->check == PCMK__XML_DOC_PRIVATE_MAGIC); pcmk__str_update(&(docpriv->acl_user), NULL); if (docpriv->acls != NULL) { pcmk__free_acls(docpriv->acls); docpriv->acls = NULL; } if(docpriv->deleted_objs) { g_list_free_full(docpriv->deleted_objs, free_deleted_object); docpriv->deleted_objs = NULL; } } } /*! * \internal * \brief Allocate and initialize private data for an XML node * * \param[in,out] node XML node whose private data to initialize * \param[in] user_data Ignored * * \return \c true (to continue traversing the tree) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool new_private_data(xmlNode *node, void *user_data) { bool tracking = false; CRM_CHECK(node != NULL, return true); if (node->_private != NULL) { return true; } tracking = pcmk__xml_doc_all_flags_set(node->doc, pcmk__xf_tracking); switch (node->type) { case XML_DOCUMENT_NODE: { xml_doc_private_t *docpriv = pcmk__assert_alloc(1, sizeof(xml_doc_private_t)); docpriv->check = PCMK__XML_DOC_PRIVATE_MAGIC; node->_private = docpriv; } break; case XML_ELEMENT_NODE: case XML_ATTRIBUTE_NODE: case XML_COMMENT_NODE: { xml_node_private_t *nodepriv = pcmk__assert_alloc(1, sizeof(xml_node_private_t)); nodepriv->check = PCMK__XML_NODE_PRIVATE_MAGIC; node->_private = nodepriv; if (tracking) { pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_created); } for (xmlAttr *iter = pcmk__xe_first_attr(node); iter != NULL; iter = iter->next) { new_private_data((xmlNode *) iter, user_data); } } break; case XML_TEXT_NODE: case XML_DTD_NODE: case XML_CDATA_SECTION_NODE: return true; default: CRM_LOG_ASSERT(node->type == XML_ELEMENT_NODE); return true; } if (tracking) { pcmk__mark_xml_node_dirty(node); } return true; } /*! * \internal * \brief Free private data for an XML node * * \param[in,out] node XML node whose private data to free * \param[in] user_data Ignored * * \return \c true (to continue traversing the tree) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool free_private_data(xmlNode *node, void *user_data) { CRM_CHECK(node != NULL, return true); if (node->_private == NULL) { return true; } if (node->type == XML_DOCUMENT_NODE) { reset_xml_private_data((xml_doc_private_t *) node->_private); } else { xml_node_private_t *nodepriv = node->_private; pcmk__assert(nodepriv->check == PCMK__XML_NODE_PRIVATE_MAGIC); for (xmlAttr *iter = pcmk__xe_first_attr(node); iter != NULL; iter = iter->next) { free_private_data((xmlNode *) iter, user_data); } } free(node->_private); node->_private = NULL; return true; } /*! * \internal * \brief Allocate and initialize private data recursively for an XML tree * * \param[in,out] node XML node whose private data to initialize */ void pcmk__xml_new_private_data(xmlNode *xml) { pcmk__xml_tree_foreach(xml, new_private_data, NULL); } /*! * \internal * \brief Free private data recursively for an XML tree * * \param[in,out] node XML node whose private data to free */ void pcmk__xml_free_private_data(xmlNode *xml) { pcmk__xml_tree_foreach(xml, free_private_data, NULL); } /*! * \internal * \brief Return ordinal position of an XML node among its siblings * * \param[in] xml XML node to check * \param[in] ignore_if_set Don't count siblings with this flag set * * \return Ordinal position of \p xml (starting with 0) */ int pcmk__xml_position(const xmlNode *xml, enum pcmk__xml_flags ignore_if_set) { int position = 0; for (const xmlNode *cIter = xml; cIter->prev; cIter = cIter->prev) { xml_node_private_t *nodepriv = ((xmlNode*)cIter->prev)->_private; if (!pcmk_is_set(nodepriv->flags, ignore_if_set)) { position++; } } return position; } /*! * \internal * \brief Remove all attributes marked as deleted from an XML node * * \param[in,out] xml XML node whose deleted attributes to remove * \param[in,out] user_data Ignored * * \return \c true (to continue traversing the tree) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool commit_attr_deletions(xmlNode *xml, void *user_data) { pcmk__xml_reset_node_flags(xml, NULL); pcmk__xe_remove_matching_attrs(xml, true, pcmk__marked_as_deleted, NULL); return true; } /*! * \internal * \brief Finalize all pending changes to an XML document and reset private data * * Clear the ACL user and all flags, unpacked ACLs, and deleted node records for * the document; clear all flags on each node in the tree; and delete any * attributes that are marked for deletion. * * \param[in,out] doc XML document * * \note When change tracking is enabled, "deleting" an attribute simply marks * it for deletion (using \c pcmk__xf_deleted) until changes are * committed. Freeing a node (using \c pcmk__xml_free()) adds a deleted * node record (\c pcmk__deleted_xml_t) to the node's document before * freeing it. * \note This function clears all flags, not just flags that indicate changes. * In particular, note that it clears the \c pcmk__xf_tracking flag, thus * disabling tracking. */ void pcmk__xml_commit_changes(xmlDoc *doc) { xml_doc_private_t *docpriv = NULL; if (doc == NULL) { return; } docpriv = doc->_private; if (docpriv == NULL) { return; } if (pcmk_is_set(docpriv->flags, pcmk__xf_dirty)) { pcmk__xml_tree_foreach(xmlDocGetRootElement(doc), commit_attr_deletions, NULL); } reset_xml_private_data(docpriv); docpriv->flags = pcmk__xf_none; } /*! * \internal * \brief Create a new XML document * * \return Newly allocated XML document (guaranteed not to be \c NULL) * * \note The caller is responsible for freeing the return value using * \c pcmk__xml_free_doc(). */ xmlDoc * pcmk__xml_new_doc(void) { xmlDoc *doc = xmlNewDoc(XML_VERSION); pcmk__mem_assert(doc); pcmk__xml_new_private_data((xmlNode *) doc); return doc; } /*! * \internal * \brief Free a new XML document * * \param[in,out] doc XML document to free */ void pcmk__xml_free_doc(xmlDoc *doc) { if (doc != NULL) { pcmk__xml_free_private_data((xmlNode *) doc); xmlFreeDoc(doc); } } /*! * \internal * \brief Check whether the first character of a string is an XML NameStartChar * * See https://www.w3.org/TR/xml/#NT-NameStartChar. * * This is almost identical to libxml2's \c xmlIsDocNameStartChar(), but they * don't expose it as part of the public API. * * \param[in] utf8 UTF-8 encoded string * \param[out] len If not \c NULL, where to store size in bytes of first * character in \p utf8 * * \return \c true if \p utf8 begins with a valid XML NameStartChar, or \c false * otherwise */ bool pcmk__xml_is_name_start_char(const char *utf8, int *len) { int c = 0; int local_len = 0; if (len == NULL) { len = &local_len; } /* xmlGetUTF8Char() abuses the len argument. At call time, it must be set to * "the minimum number of bytes present in the sequence... to assure the * next character is completely contained within the sequence." It's similar * to the "n" in the strn*() functions. However, this doesn't make any sense * for null-terminated strings, and there's no value that indicates "keep * going until '\0'." So we set it to 4, the max number of bytes in a UTF-8 * character. * * At return, it's set to the actual number of bytes in the char, or 0 on * error. */ *len = 4; // Note: xmlGetUTF8Char() assumes a 32-bit int c = xmlGetUTF8Char((const xmlChar *) utf8, len); if (c < 0) { GString *buf = g_string_sized_new(32); for (int i = 0; (i < 4) && (utf8[i] != '\0'); i++) { g_string_append_printf(buf, " 0x%.2X", utf8[i]); } crm_info("Invalid UTF-8 character (bytes:%s)", (pcmk__str_empty(buf->str)? " " : buf->str)); g_string_free(buf, TRUE); return false; } return (c == '_') || (c == ':') || ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) || ((c >= 0xC0) && (c <= 0xD6)) || ((c >= 0xD8) && (c <= 0xF6)) || ((c >= 0xF8) && (c <= 0x2FF)) || ((c >= 0x370) && (c <= 0x37D)) || ((c >= 0x37F) && (c <= 0x1FFF)) || ((c >= 0x200C) && (c <= 0x200D)) || ((c >= 0x2070) && (c <= 0x218F)) || ((c >= 0x2C00) && (c <= 0x2FEF)) || ((c >= 0x3001) && (c <= 0xD7FF)) || ((c >= 0xF900) && (c <= 0xFDCF)) || ((c >= 0xFDF0) && (c <= 0xFFFD)) || ((c >= 0x10000) && (c <= 0xEFFFF)); } /*! * \internal * \brief Check whether the first character of a string is an XML NameChar * * See https://www.w3.org/TR/xml/#NT-NameChar. * * This is almost identical to libxml2's \c xmlIsDocNameChar(), but they don't * expose it as part of the public API. * * \param[in] utf8 UTF-8 encoded string * \param[out] len If not \c NULL, where to store size in bytes of first * character in \p utf8 * * \return \c true if \p utf8 begins with a valid XML NameChar, or \c false * otherwise */ bool pcmk__xml_is_name_char(const char *utf8, int *len) { int c = 0; int local_len = 0; if (len == NULL) { len = &local_len; } // See comment regarding len in pcmk__xml_is_name_start_char() *len = 4; // Note: xmlGetUTF8Char() assumes a 32-bit int c = xmlGetUTF8Char((const xmlChar *) utf8, len); if (c < 0) { GString *buf = g_string_sized_new(32); for (int i = 0; (i < 4) && (utf8[i] != '\0'); i++) { g_string_append_printf(buf, " 0x%.2X", utf8[i]); } crm_info("Invalid UTF-8 character (bytes:%s)", (pcmk__str_empty(buf->str)? " " : buf->str)); g_string_free(buf, TRUE); return false; } return ((c >= 'a') && (c <= 'z')) || ((c >= 'A') && (c <= 'Z')) || ((c >= '0') && (c <= '9')) || (c == '_') || (c == ':') || (c == '-') || (c == '.') || (c == 0xB7) || ((c >= 0xC0) && (c <= 0xD6)) || ((c >= 0xD8) && (c <= 0xF6)) || ((c >= 0xF8) && (c <= 0x2FF)) || ((c >= 0x300) && (c <= 0x36F)) || ((c >= 0x370) && (c <= 0x37D)) || ((c >= 0x37F) && (c <= 0x1FFF)) || ((c >= 0x200C) && (c <= 0x200D)) || ((c >= 0x203F) && (c <= 0x2040)) || ((c >= 0x2070) && (c <= 0x218F)) || ((c >= 0x2C00) && (c <= 0x2FEF)) || ((c >= 0x3001) && (c <= 0xD7FF)) || ((c >= 0xF900) && (c <= 0xFDCF)) || ((c >= 0xFDF0) && (c <= 0xFFFD)) || ((c >= 0x10000) && (c <= 0xEFFFF)); } /*! * \internal * \brief Sanitize a string so it is usable as an XML ID * * An ID must match the Name production as defined here: * https://www.w3.org/TR/xml/#NT-Name. * * Convert an invalid start character to \c '_'. Convert an invalid character * after the start character to \c '.'. * * \param[in,out] id String to sanitize */ void pcmk__xml_sanitize_id(char *id) { bool valid = true; int len = 0; // If id is empty or NULL, there's no way to make it a valid XML ID pcmk__assert(!pcmk__str_empty(id)); /* @TODO Suppose there are two strings and each has an invalid ID character * in the same position. The strings are otherwise identical. Both strings * will be sanitized to the same valid ID, which is incorrect. * * The caller is responsible for ensuring the sanitized ID does not already * exist in a given XML document before using it, if uniqueness is desired. */ valid = pcmk__xml_is_name_start_char(id, &len); CRM_CHECK(len > 0, return); // UTF-8 encoding error if (!valid) { *id = '_'; for (int i = 1; i < len; i++) { id[i] = '.'; } } for (id += len; *id != '\0'; id += len) { valid = pcmk__xml_is_name_char(id, &len); CRM_CHECK(len > 0, return); // UTF-8 encoding error if (!valid) { for (int i = 0; i < len; i++) { id[i] = '.'; } } } } /*! * \internal * \brief Free an XML tree without ACL checks or change tracking * * \param[in,out] xml XML node to free */ void pcmk__xml_free_node(xmlNode *xml) { pcmk__xml_free_private_data(xml); xmlUnlinkNode(xml); xmlFreeNode(xml); } /*! * \internal * \brief Free an XML tree if ACLs allow; track deletion if tracking is enabled * * If \p node is the root of its document, free the entire document. * * \param[in,out] node XML node to free * \param[in] position Position of \p node among its siblings for change * tracking (negative to calculate automatically if * needed) * * \return Standard Pacemaker return code */ static int free_xml_with_position(xmlNode *node, int position) { xmlDoc *doc = NULL; xml_node_private_t *nodepriv = NULL; if (node == NULL) { return pcmk_rc_ok; } doc = node->doc; nodepriv = node->_private; if ((doc != NULL) && (xmlDocGetRootElement(doc) == node)) { /* @TODO Should we check ACLs first? Otherwise it seems like we could * free the root element without write permission. */ pcmk__xml_free_doc(doc); return pcmk_rc_ok; } if (!pcmk__check_acl(node, NULL, pcmk__xf_acl_write)) { pcmk__if_tracing( { GString *xpath = pcmk__element_xpath(node); qb_log_from_external_source(__func__, __FILE__, "Cannot remove %s %x", LOG_TRACE, __LINE__, 0, xpath->str, nodepriv->flags); g_string_free(xpath, TRUE); }, {} ); return EACCES; } if (pcmk__xml_doc_all_flags_set(node->doc, pcmk__xf_tracking) && !pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { xml_doc_private_t *docpriv = doc->_private; GString *xpath = pcmk__element_xpath(node); if (xpath != NULL) { pcmk__deleted_xml_t *deleted_obj = NULL; crm_trace("Deleting %s %p from %p", xpath->str, node, doc); deleted_obj = pcmk__assert_alloc(1, sizeof(pcmk__deleted_xml_t)); deleted_obj->path = g_string_free(xpath, FALSE); deleted_obj->position = -1; // Record the position only for XML comments for now if (node->type == XML_COMMENT_NODE) { if (position >= 0) { deleted_obj->position = position; } else { deleted_obj->position = pcmk__xml_position(node, pcmk__xf_skip); } } docpriv->deleted_objs = g_list_append(docpriv->deleted_objs, deleted_obj); pcmk__xml_doc_set_flags(node->doc, pcmk__xf_dirty); } } pcmk__xml_free_node(node); return pcmk_rc_ok; } /*! * \internal * \brief Free an XML tree if ACLs allow; track deletion if tracking is enabled * * If \p xml is the root of its document, free the entire document. * * \param[in,out] xml XML node to free */ void pcmk__xml_free(xmlNode *xml) { free_xml_with_position(xml, -1); } /*! * \internal * \brief Make a deep copy of an XML node under a given parent * * \param[in,out] parent XML element that will be the copy's parent (\c NULL * to create a new XML document with the copy as root) * \param[in] src XML node to copy * * \return Deep copy of \p src, or \c NULL if \p src is \c NULL */ xmlNode * pcmk__xml_copy(xmlNode *parent, xmlNode *src) { xmlNode *copy = NULL; if (src == NULL) { return NULL; } if (parent == NULL) { xmlDoc *doc = NULL; // The copy will be the root element of a new document pcmk__assert(src->type == XML_ELEMENT_NODE); doc = pcmk__xml_new_doc(); copy = xmlDocCopyNode(src, doc, 1); pcmk__mem_assert(copy); xmlDocSetRootElement(doc, copy); } else { copy = xmlDocCopyNode(src, parent->doc, 1); pcmk__mem_assert(copy); xmlAddChild(parent, copy); } pcmk__xml_new_private_data(copy); return copy; } /*! * \internal * \brief Remove XML text nodes from specified XML and all its children * * \param[in,out] xml XML to strip text from */ void pcmk__strip_xml_text(xmlNode *xml) { xmlNode *iter = xml->children; while (iter) { xmlNode *next = iter->next; switch (iter->type) { case XML_TEXT_NODE: pcmk__xml_free_node(iter); break; case XML_ELEMENT_NODE: /* Search it */ pcmk__strip_xml_text(iter); break; default: /* Leave it */ break; } iter = next; } } /*! * \internal * \brief Check whether a string has XML special characters that must be escaped * * See \c pcmk__xml_escape() and \c pcmk__xml_escape_type for more details. * * \param[in] text String to check * \param[in] type Type of escaping * * \return \c true if \p text has special characters that need to be escaped, or * \c false otherwise */ bool pcmk__xml_needs_escape(const char *text, enum pcmk__xml_escape_type type) { if (text == NULL) { return false; } while (*text != '\0') { switch (type) { case pcmk__xml_escape_text: switch (*text) { case '<': case '>': case '&': return true; case '\n': case '\t': break; default: if (g_ascii_iscntrl(*text)) { return true; } break; } break; case pcmk__xml_escape_attr: switch (*text) { case '<': case '>': case '&': case '"': return true; default: if (g_ascii_iscntrl(*text)) { return true; } break; } break; case pcmk__xml_escape_attr_pretty: switch (*text) { case '\n': case '\r': case '\t': case '"': return true; default: break; } break; default: // Invalid enum value pcmk__assert(false); break; } text = g_utf8_next_char(text); } return false; } /*! * \internal * \brief Replace special characters with their XML escape sequences * * \param[in] text Text to escape * \param[in] type Type of escaping * * \return Newly allocated string equivalent to \p text but with special * characters replaced with XML escape sequences (or \c NULL if \p text * is \c NULL). If \p text is not \c NULL, the return value is * guaranteed not to be \c NULL. * * \note There are libxml functions that purport to do this: * \c xmlEncodeEntitiesReentrant() and \c xmlEncodeSpecialChars(). * However, their escaping is incomplete. See: * https://discourse.gnome.org/t/intended-use-of-xmlencodeentitiesreentrant-vs-xmlencodespecialchars/19252 * \note The caller is responsible for freeing the return value using * \c g_free(). */ gchar * pcmk__xml_escape(const char *text, enum pcmk__xml_escape_type type) { GString *copy = NULL; if (text == NULL) { return NULL; } copy = g_string_sized_new(strlen(text)); while (*text != '\0') { // Don't escape any non-ASCII characters if ((*text & 0x80) != 0) { size_t bytes = g_utf8_next_char(text) - text; g_string_append_len(copy, text, bytes); text += bytes; continue; } switch (type) { case pcmk__xml_escape_text: switch (*text) { case '<': g_string_append(copy, PCMK__XML_ENTITY_LT); break; case '>': g_string_append(copy, PCMK__XML_ENTITY_GT); break; case '&': g_string_append(copy, PCMK__XML_ENTITY_AMP); break; case '\n': case '\t': g_string_append_c(copy, *text); break; default: if (g_ascii_iscntrl(*text)) { g_string_append_printf(copy, "&#x%.2X;", *text); } else { g_string_append_c(copy, *text); } break; } break; case pcmk__xml_escape_attr: switch (*text) { case '<': g_string_append(copy, PCMK__XML_ENTITY_LT); break; case '>': g_string_append(copy, PCMK__XML_ENTITY_GT); break; case '&': g_string_append(copy, PCMK__XML_ENTITY_AMP); break; case '"': g_string_append(copy, PCMK__XML_ENTITY_QUOT); break; default: if (g_ascii_iscntrl(*text)) { g_string_append_printf(copy, "&#x%.2X;", *text); } else { g_string_append_c(copy, *text); } break; } break; case pcmk__xml_escape_attr_pretty: switch (*text) { case '"': g_string_append(copy, "\\\""); break; case '\n': g_string_append(copy, "\\n"); break; case '\r': g_string_append(copy, "\\r"); break; case '\t': g_string_append(copy, "\\t"); break; default: g_string_append_c(copy, *text); break; } break; default: // Invalid enum value pcmk__assert(false); break; } text = g_utf8_next_char(text); } return g_string_free(copy, FALSE); } /*! * \internal * \brief Add an XML attribute to a node, marked as deleted * * When calculating XML changes, we need to know when an attribute has been * deleted. Add the attribute back to the new XML, so that we can check the * removal against ACLs, and mark it as deleted for later removal after * differences have been calculated. * * \param[in,out] new_xml XML to modify * \param[in] element Name of XML element that changed (for logging) * \param[in] attr_name Name of attribute that was deleted * \param[in] old_value Value of attribute that was deleted */ static void mark_attr_deleted(xmlNode *new_xml, const char *element, const char *attr_name, const char *old_value) { xml_doc_private_t *docpriv = new_xml->doc->_private; xmlAttr *attr = NULL; xml_node_private_t *nodepriv; /* Restore the old value (without setting dirty flag recursively upwards or * checking ACLs) */ pcmk__clear_xml_flags(docpriv, pcmk__xf_tracking); crm_xml_add(new_xml, attr_name, old_value); pcmk__set_xml_flags(docpriv, pcmk__xf_tracking); // Reset flags (so the attribute doesn't appear as newly created) attr = xmlHasProp(new_xml, (const xmlChar *) attr_name); nodepriv = attr->_private; nodepriv->flags = 0; // Check ACLs and mark restored value for later removal pcmk__xa_remove(attr, false); crm_trace("XML attribute %s=%s was removed from %s", attr_name, old_value, element); } /* * \internal * \brief Check ACLs for a changed XML attribute */ static void mark_attr_changed(xmlNode *new_xml, const char *element, const char *attr_name, const char *old_value) { xml_doc_private_t *docpriv = new_xml->doc->_private; - char *vcopy = crm_element_value_copy(new_xml, attr_name); + char *vcopy = pcmk__xe_get_copy(new_xml, attr_name); crm_trace("XML attribute %s was changed from '%s' to '%s' in %s", attr_name, old_value, vcopy, element); // Restore the original value (without checking ACLs) pcmk__clear_xml_flags(docpriv, pcmk__xf_tracking); crm_xml_add(new_xml, attr_name, old_value); pcmk__set_xml_flags(docpriv, pcmk__xf_tracking); // Change it back to the new value, to check ACLs crm_xml_add(new_xml, attr_name, vcopy); free(vcopy); } /*! * \internal * \brief Mark an XML attribute as having changed position * * \param[in,out] new_xml XML to modify * \param[in] element Name of XML element that changed (for logging) * \param[in,out] old_attr Attribute that moved, in original XML * \param[in,out] new_attr Attribute that moved, in \p new_xml * \param[in] p_old Ordinal position of \p old_attr in original XML * \param[in] p_new Ordinal position of \p new_attr in \p new_xml */ static void mark_attr_moved(xmlNode *new_xml, const char *element, xmlAttr *old_attr, xmlAttr *new_attr, int p_old, int p_new) { xml_node_private_t *nodepriv = new_attr->_private; crm_trace("XML attribute %s moved from position %d to %d in %s", old_attr->name, p_old, p_new, element); // Mark document, element, and all element's parents as changed pcmk__mark_xml_node_dirty(new_xml); // Mark attribute as changed pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_moved); nodepriv = (p_old > p_new)? old_attr->_private : new_attr->_private; pcmk__set_xml_flags(nodepriv, pcmk__xf_skip); } /*! * \internal * \brief Calculate differences in all previously existing XML attributes * * \param[in,out] old_xml Original XML to compare * \param[in,out] new_xml New XML to compare */ static void xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml) { xmlAttr *attr_iter = pcmk__xe_first_attr(old_xml); while (attr_iter != NULL) { const char *name = (const char *) attr_iter->name; xmlAttr *old_attr = attr_iter; xmlAttr *new_attr = xmlHasProp(new_xml, attr_iter->name); const char *old_value = pcmk__xml_attr_value(attr_iter); attr_iter = attr_iter->next; if (new_attr == NULL) { mark_attr_deleted(new_xml, (const char *) old_xml->name, name, old_value); } else { xml_node_private_t *nodepriv = new_attr->_private; int new_pos = pcmk__xml_position((xmlNode*) new_attr, pcmk__xf_skip); int old_pos = pcmk__xml_position((xmlNode*) old_attr, pcmk__xf_skip); const char *new_value = pcmk__xe_get(new_xml, name); // This attribute isn't new pcmk__clear_xml_flags(nodepriv, pcmk__xf_created); if (strcmp(new_value, old_value) != 0) { mark_attr_changed(new_xml, (const char *) old_xml->name, name, old_value); } else if ((old_pos != new_pos) && !pcmk__xml_doc_all_flags_set(new_xml->doc, pcmk__xf_ignore_attr_pos |pcmk__xf_tracking)) { /* pcmk__xf_tracking is always set by pcmk__xml_mark_changes() * before this function is called, so only the * pcmk__xf_ignore_attr_pos check is truly relevant. */ mark_attr_moved(new_xml, (const char *) old_xml->name, old_attr, new_attr, old_pos, new_pos); } } } } /*! * \internal * \brief Check all attributes in new XML for creation * * For each of a given XML element's attributes marked as newly created, accept * (and mark as dirty) or reject the creation according to ACLs. * * \param[in,out] new_xml XML to check */ static void mark_created_attrs(xmlNode *new_xml) { xmlAttr *attr_iter = pcmk__xe_first_attr(new_xml); while (attr_iter != NULL) { xmlAttr *new_attr = attr_iter; xml_node_private_t *nodepriv = attr_iter->_private; attr_iter = attr_iter->next; if (pcmk_is_set(nodepriv->flags, pcmk__xf_created)) { const char *attr_name = (const char *) new_attr->name; crm_trace("Created new attribute %s=%s in %s", attr_name, pcmk__xml_attr_value(new_attr), new_xml->name); /* Check ACLs (we can't use the remove-then-create trick because it * would modify the attribute position). */ if (pcmk__check_acl(new_xml, attr_name, pcmk__xf_acl_write)) { pcmk__mark_xml_attr_dirty(new_attr); } else { // Creation was not allowed, so remove the attribute pcmk__xa_remove(new_attr, true); } } } } /*! * \internal * \brief Calculate differences in attributes between two XML nodes * * \param[in,out] old_xml Original XML to compare * \param[in,out] new_xml New XML to compare */ static void xml_diff_attrs(xmlNode *old_xml, xmlNode *new_xml) { // Cleared later if attributes are not really new for (xmlAttr *attr = pcmk__xe_first_attr(new_xml); attr != NULL; attr = attr->next) { xml_node_private_t *nodepriv = attr->_private; pcmk__set_xml_flags(nodepriv, pcmk__xf_created); } xml_diff_old_attrs(old_xml, new_xml); mark_created_attrs(new_xml); } /*! * \internal * \brief Add a deleted object record for an old XML child if ACLs allow * * This is intended to be called for a child of an old XML element that is not * present as a child of a new XML element. * * Add a temporary copy of the old child to the new XML. Then check whether ACLs * would have allowed the deletion of that element. If so, add a deleted object * record for it to the new XML's document, and set the \c pcmk__xf_skip flag on * the old child. * * The temporary copy is removed before returning. The new XML and all of its * ancestors will have the \c pcmk__xf_dirty flag set because of the creation, * however. * * \param[in,out] old_child Child of old XML * \param[in,out] new_parent New XML that does not contain \p old_child */ static void mark_child_deleted(xmlNode *old_child, xmlNode *new_parent) { int pos = pcmk__xml_position(old_child, pcmk__xf_skip); // Re-create the child element so we can check ACLs xmlNode *candidate = pcmk__xml_copy(new_parent, old_child); // Clear flags on new child and its children pcmk__xml_tree_foreach(candidate, pcmk__xml_reset_node_flags, NULL); // free_xml_with_position() will check whether ACLs allow the deletion pcmk__apply_acl(xmlDocGetRootElement(candidate->doc)); /* Try to remove the child again (which will track it in document's * deleted_objs on success) */ if (free_xml_with_position(candidate, pos) != pcmk_rc_ok) { // ACLs denied deletion in free_xml_with_position. Free candidate here. pcmk__xml_free_node(candidate); } pcmk__set_xml_flags((xml_node_private_t *) old_child->_private, pcmk__xf_skip); } /*! * \internal * \brief Mark a new child as moved and set \c pcmk__xf_skip as appropriate * * \param[in,out] old_child Child of old XML * \param[in,out] new_child Child of new XML that matches \p old_child * \param[in] old_pos Position of \p old_child among its siblings * \param[in] new_pos Position of \p new_child among its siblings */ static void mark_child_moved(xmlNode *old_child, xmlNode *new_child, int old_pos, int new_pos) { const char *id_s = pcmk__s(pcmk__xe_id(new_child), ""); xmlNode *new_parent = new_child->parent; xml_node_private_t *nodepriv = new_child->_private; crm_trace("Child element %s with " PCMK_XA_ID "='%s' moved from position " "%d to %d under %s", new_child->name, id_s, old_pos, new_pos, new_parent->name); pcmk__mark_xml_node_dirty(new_parent); pcmk__set_xml_flags(nodepriv, pcmk__xf_moved); /* @TODO Figure out and document why we skip the old child in future * position calculations if the old position is higher, and skip the new * child in future position calculations if the new position is higher. This * goes back to 9bb15b38, and there's no explanation in the commit message. */ if (old_pos > new_pos) { nodepriv = old_child->_private; } pcmk__set_xml_flags(nodepriv, pcmk__xf_skip); } /*! * \internal * \brief Check whether a new XML child comment matches an old XML child comment * * Two comments match if they have the same position among their siblings and * the same contents. * * If \p new_comment has the \c pcmk__xf_skip flag set, then it is automatically * considered not to match. * * \param[in] old_comment Old XML child element * \param[in] new_comment New XML child element * * \retval \c true if \p new_comment matches \p old_comment * \retval \c false otherwise */ static bool new_comment_matches(const xmlNode *old_comment, const xmlNode *new_comment) { xml_node_private_t *nodepriv = new_comment->_private; if (pcmk_is_set(nodepriv->flags, pcmk__xf_skip)) { /* @TODO Should we also return false if old_comment has pcmk__xf_skip * set? This preserves existing behavior at time of writing. */ return false; } if (pcmk__xml_position(old_comment, pcmk__xf_skip) != pcmk__xml_position(new_comment, pcmk__xf_skip)) { return false; } return pcmk__xc_matches(old_comment, new_comment); } /*! * \internal * \brief Check whether a new XML child element matches an old XML child element * * Two elements match if they have the same name and, if \p match_ids is * \c true, the same ID. (Both IDs can be \c NULL in this case.) * * \param[in] old_element Old XML child element * \param[in] new_element New XML child element * \param[in] match_ids If \c true, require IDs to match (or both to be * \c NULL) * * \retval \c true if \p new_element matches \p old_element * \retval \c false otherwise */ static bool new_element_matches(const xmlNode *old_element, const xmlNode *new_element, bool match_ids) { if (!pcmk__xe_is(new_element, (const char *) old_element->name)) { return false; } return !match_ids || pcmk__str_eq(pcmk__xe_id(old_element), pcmk__xe_id(new_element), pcmk__str_none); } /*! * \internal * \brief Check whether a new XML child node matches an old XML child node * * Node types must be the same in order to match. * * For comments, a match is a comment at the same position with the same * content. * * For elements, a match is an element with the same name and, if required, the * same ID. (Both IDs can be \c NULL in this case.) * * For other node types, there is no match. * * \param[in] old_child Child of old XML * \param[in] new_child Child of new XML * \param[in] match_ids If \c true, require element IDs to match (or both to be * \c NULL) * * \retval \c true if \p new_child matches \p old_child * \retval \c false otherwise */ static bool new_child_matches(const xmlNode *old_child, const xmlNode *new_child, bool match_ids) { if (old_child->type != new_child->type) { return false; } switch (old_child->type) { case XML_COMMENT_NODE: return new_comment_matches(old_child, new_child); case XML_ELEMENT_NODE: return new_element_matches(old_child, new_child, match_ids); default: return false; } } /*! * \internal * \brief Find matching XML node pairs between old and new XML's children * * A node that is part of a matching pair has its _private:match member * set to the matching node. * * \param[in,out] old_xml Old XML * \param[in,out] new_xml New XML * \param[in] comments_ids If \c true, match comments and require element * IDs to match; otherwise, skip comments and match * elements by name only */ static void find_matching_children(xmlNode *old_xml, xmlNode *new_xml, bool comments_ids) { for (xmlNode *old_child = pcmk__xml_first_child(old_xml); old_child != NULL; old_child = pcmk__xml_next(old_child)) { xml_node_private_t *old_nodepriv = old_child->_private; if ((old_nodepriv == NULL) || (old_nodepriv->match != NULL)) { // Can't process, or we already found a match for this old child continue; } if (!comments_ids && (old_child->type != XML_ELEMENT_NODE)) { /* We only match comments and elements, and we're not matching * comments during this call */ continue; } for (xmlNode *new_child = pcmk__xml_first_child(new_xml); new_child != NULL; new_child = pcmk__xml_next(new_child)) { xml_node_private_t *new_nodepriv = new_child->_private; if ((new_nodepriv == NULL) || (new_nodepriv->match != NULL)) { /* Can't process, or this new child already matched some old * child */ continue; } if (new_child_matches(old_child, new_child, comments_ids)) { old_nodepriv->match = new_child; new_nodepriv->match = old_child; break; } } } } /*! * \internal * \brief Mark changes between two XML trees * * Set flags in a new XML tree to indicate changes relative to an old XML tree. * * \param[in,out] old_xml XML before changes * \param[in,out] new_xml XML after changes * * \note This may set \c pcmk__xf_skip on parts of \p old_xml. */ void pcmk__xml_mark_changes(xmlNode *old_xml, xmlNode *new_xml) { /* This function may set the xml_node_private_t:match member on children of * old_xml and new_xml, but it clears that member before returning. * * @TODO Ensure we handle (for example, by copying) or reject user-created * XML that is missing xml_node_private_t at top level or in any children. * Similarly, check handling of node types for which we don't create private * data. For now, we'll skip them in the loops below. */ CRM_CHECK((old_xml != NULL) && (new_xml != NULL), return); if ((old_xml->_private == NULL) || (new_xml->_private == NULL)) { return; } pcmk__xml_doc_set_flags(new_xml->doc, pcmk__xf_tracking); xml_diff_attrs(old_xml, new_xml); find_matching_children(old_xml, new_xml, true); find_matching_children(old_xml, new_xml, false); // Process matches (changed children) and deletions for (xmlNode *old_child = pcmk__xml_first_child(old_xml); old_child != NULL; old_child = pcmk__xml_next(old_child)) { xml_node_private_t *nodepriv = old_child->_private; xmlNode *new_child = NULL; if (nodepriv == NULL) { continue; } if (nodepriv->match == NULL) { // No match in new XML means the old child was deleted mark_child_deleted(old_child, new_xml); continue; } /* Fetch the match and clear old_child->_private's match member. * new_child->_private's match member is handled in the new_xml loop. */ new_child = nodepriv->match; nodepriv->match = NULL; pcmk__assert(old_child->type == new_child->type); if (old_child->type == XML_COMMENT_NODE) { // Comments match only if their positions and contents match continue; } pcmk__xml_mark_changes(old_child, new_child); } /* Mark unmatched new children as created, and mark matched new children as * moved if their positions changed. Grab the next new child in advance, * since new_child may get freed in the loop body. */ for (xmlNode *new_child = pcmk__xml_first_child(new_xml), *next = pcmk__xml_next(new_child); new_child != NULL; new_child = next, next = pcmk__xml_next(new_child)) { xml_node_private_t *nodepriv = new_child->_private; if (nodepriv == NULL) { continue; } if (nodepriv->match != NULL) { /* Fetch the match and clear new_child->_private's match member. Any * changes were marked in the old_xml loop. Mark the move. * * We might be able to mark the move earlier, when we mark changes * for matches in the old_xml loop, consolidating both actions. We'd * have to think about whether the timing of setting the * pcmk__xf_skip flag makes any difference. */ xmlNode *old_child = nodepriv->match; int old_pos = pcmk__xml_position(old_child, pcmk__xf_skip); int new_pos = pcmk__xml_position(new_child, pcmk__xf_skip); if (old_pos != new_pos) { mark_child_moved(old_child, new_child, old_pos, new_pos); } nodepriv->match = NULL; continue; } // No match in old XML means the new child is newly created pcmk__set_xml_flags(nodepriv, pcmk__xf_skip); mark_xml_tree_dirty_created(new_child); // Check whether creation was allowed (may free new_child) pcmk__apply_creation_acl(new_child, true); } } /*! * \internal * \brief Initialize the Pacemaker XML environment * * Set an XML buffer allocation scheme, set XML node create and destroy * callbacks, and load schemas into the cache. */ void pcmk__xml_init(void) { // @TODO Try to find a better caller than crm_log_preinit() static bool initialized = false; if (!initialized) { initialized = true; /* Double the buffer size when the buffer needs to grow. The default * allocator XML_BUFFER_ALLOC_EXACT was found to cause poor performance * due to the number of reallocs. */ xmlSetBufferAllocationScheme(XML_BUFFER_ALLOC_DOUBLEIT); // Load schemas into the cache pcmk__schema_init(); } } /*! * \internal * \brief Tear down the Pacemaker XML environment * * Destroy schema cache and clean up memory allocated by libxml2. */ void pcmk__xml_cleanup(void) { pcmk__schema_cleanup(); xmlCleanupParser(); } char * pcmk__xml_artefact_root(enum pcmk__xml_artefact_ns ns) { static const char *base = NULL; char *ret = NULL; if (base == NULL) { base = pcmk__env_option(PCMK__ENV_SCHEMA_DIRECTORY); } if (pcmk__str_empty(base)) { base = PCMK_SCHEMA_DIR; } switch (ns) { case pcmk__xml_artefact_ns_legacy_rng: case pcmk__xml_artefact_ns_legacy_xslt: ret = strdup(base); break; case pcmk__xml_artefact_ns_base_rng: case pcmk__xml_artefact_ns_base_xslt: ret = crm_strdup_printf("%s/base", base); break; default: crm_err("XML artefact family specified as %u not recognized", ns); } return ret; } static char * find_artefact(enum pcmk__xml_artefact_ns ns, const char *path, const char *filespec) { char *ret = NULL; switch (ns) { case pcmk__xml_artefact_ns_legacy_rng: case pcmk__xml_artefact_ns_base_rng: if (pcmk__ends_with(filespec, ".rng")) { ret = crm_strdup_printf("%s/%s", path, filespec); } else { ret = crm_strdup_printf("%s/%s.rng", path, filespec); } break; case pcmk__xml_artefact_ns_legacy_xslt: case pcmk__xml_artefact_ns_base_xslt: if (pcmk__ends_with(filespec, ".xsl")) { ret = crm_strdup_printf("%s/%s", path, filespec); } else { ret = crm_strdup_printf("%s/%s.xsl", path, filespec); } break; default: crm_err("XML artefact family specified as %u not recognized", ns); } return ret; } char * pcmk__xml_artefact_path(enum pcmk__xml_artefact_ns ns, const char *filespec) { struct stat sb; char *base = pcmk__xml_artefact_root(ns); char *ret = NULL; ret = find_artefact(ns, base, filespec); free(base); if (stat(ret, &sb) != 0 || !S_ISREG(sb.st_mode)) { const char *remote_schema_dir = pcmk__remote_schema_dir(); free(ret); ret = find_artefact(ns, remote_schema_dir, filespec); } return ret; } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include xmlNode * copy_xml(xmlNode *src) { xmlDoc *doc = pcmk__xml_new_doc(); xmlNode *copy = NULL; copy = xmlDocCopyNode(src, doc, 1); pcmk__mem_assert(copy); xmlDocSetRootElement(doc, copy); pcmk__xml_new_private_data(copy); return copy; } void crm_xml_init(void) { pcmk__xml_init(); } void crm_xml_cleanup(void) { pcmk__xml_cleanup(); } void pcmk_free_xml_subtree(xmlNode *xml) { pcmk__xml_free_node(xml); } void free_xml(xmlNode *child) { pcmk__xml_free(child); } void crm_xml_sanitize_id(char *id) { char *c; for (c = id; *c; ++c) { switch (*c) { case ':': case '#': *c = '.'; } } } bool xml_tracking_changes(xmlNode *xml) { return (xml != NULL) && pcmk__xml_doc_all_flags_set(xml->doc, pcmk__xf_tracking); } bool xml_document_dirty(xmlNode *xml) { return (xml != NULL) && pcmk__xml_doc_all_flags_set(xml->doc, pcmk__xf_dirty); } void xml_accept_changes(xmlNode *xml) { if (xml != NULL) { pcmk__xml_commit_changes(xml->doc); } } void xml_track_changes(xmlNode *xml, const char *user, xmlNode *acl_source, bool enforce_acls) { if (xml == NULL) { return; } pcmk__xml_commit_changes(xml->doc); crm_trace("Tracking changes%s to %p", (enforce_acls? " with ACLs" : ""), xml); pcmk__xml_doc_set_flags(xml->doc, pcmk__xf_tracking); if (enforce_acls) { if (acl_source == NULL) { acl_source = xml; } pcmk__xml_doc_set_flags(xml->doc, pcmk__xf_acl_enabled); pcmk__unpack_acl(acl_source, xml, user); pcmk__apply_acl(xml); } } void xml_calculate_changes(xmlNode *old_xml, xmlNode *new_xml) { CRM_CHECK((old_xml != NULL) && (new_xml != NULL) && pcmk__xe_is(old_xml, (const char *) new_xml->name) && pcmk__str_eq(pcmk__xe_id(old_xml), pcmk__xe_id(new_xml), pcmk__str_none), return); if (!pcmk__xml_doc_all_flags_set(new_xml->doc, pcmk__xf_tracking)) { // Ensure tracking has a clean start (pcmk__xml_mark_changes() enables) pcmk__xml_commit_changes(new_xml->doc); } pcmk__xml_mark_changes(old_xml, new_xml); } void xml_calculate_significant_changes(xmlNode *old_xml, xmlNode *new_xml) { CRM_CHECK((old_xml != NULL) && (new_xml != NULL) && pcmk__xe_is(old_xml, (const char *) new_xml->name) && pcmk__str_eq(pcmk__xe_id(old_xml), pcmk__xe_id(new_xml), pcmk__str_none), return); /* BUG: If pcmk__xf_tracking is not set for new_xml when this function is * called, then we unset pcmk__xf_ignore_attr_pos via * pcmk__xml_commit_changes(). Since this function is about to be * deprecated, it's not worth fixing this and changing the user-facing * behavior. */ pcmk__xml_doc_set_flags(new_xml->doc, pcmk__xf_ignore_attr_pos); if (!pcmk__xml_doc_all_flags_set(new_xml->doc, pcmk__xf_tracking)) { // Ensure tracking has a clean start (pcmk__xml_mark_changes() enables) pcmk__xml_commit_changes(new_xml->doc); } pcmk__xml_mark_changes(old_xml, new_xml); } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/fencing/st_actions.c b/lib/fencing/st_actions.c index 378f94e4a9..b60926fe03 100644 --- a/lib/fencing/st_actions.c +++ b/lib/fencing/st_actions.c @@ -1,730 +1,730 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include // xmlNode #include #include #include #include #include #include "fencing_private.h" struct stonith_action_s { /*! user defined data */ char *agent; char *action; GHashTable *args; int timeout; bool async; void *userdata; void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data); void (*fork_cb) (int pid, void *user_data); svc_action_t *svc_action; /*! internal timing information */ time_t initial_start_time; int tries; int remaining_timeout; int max_retries; int pid; pcmk__action_result_t result; }; static int internal_stonith_action_execute(stonith_action_t *action); static void log_action(stonith_action_t *action, pid_t pid); /*! * \internal * \brief Set an action's result based on services library result * * \param[in,out] action Fence action to set result for * \param[in,out] svc_action Service action to get result from */ static void set_result_from_svc_action(stonith_action_t *action, svc_action_t *svc_action) { services__copy_result(svc_action, &(action->result)); pcmk__set_result_output(&(action->result), services__grab_stdout(svc_action), services__grab_stderr(svc_action)); } static void log_action(stonith_action_t *action, pid_t pid) { /* The services library has already logged the output at info or debug * level, so just raise to warning for stderr. */ if (action->result.action_stderr != NULL) { /* Logging the whole string confuses syslog when the string is xml */ char *prefix = crm_strdup_printf("%s[%d] stderr:", action->agent, pid); crm_log_output(LOG_WARNING, prefix, action->result.action_stderr); free(prefix); } } static void append_config_arg(gpointer key, gpointer value, gpointer user_data) { /* Filter out parameters handled directly by Pacemaker. * * STONITH_ATTR_ACTION_OP is added elsewhere and should never be part of the * fencing resource's parameter list. We should ignore its value if it is * configured there. */ if (!pcmk__str_eq(key, STONITH_ATTR_ACTION_OP, pcmk__str_casei) && !pcmk_stonith_param(key) && (strstr(key, CRM_META) == NULL) && !pcmk__str_eq(key, PCMK_XA_CRM_FEATURE_SET, pcmk__str_none)) { crm_trace("Passing %s=%s with fence action", (const char *) key, (const char *) (value? value : "")); pcmk__insert_dup((GHashTable *) user_data, key, pcmk__s(value, "")); } } /*! * \internal * \brief Create a table of arguments for a fencing action * * \param[in] agent Fencing agent name * \param[in] action Name of fencing action * \param[in] target Name of target node for fencing action * \param[in] target_nodeid Node ID of target node for fencing action * \param[in] device_args Fence device parameters * \param[in] port_map Target node-to-port mapping for fence device * \param[in] host_arg Argument name for passing target * * \return Newly created hash table of arguments for fencing action */ static GHashTable * make_args(const char *agent, const char *action, const char *target, uint32_t target_nodeid, GHashTable *device_args, GHashTable *port_map, const char *host_arg) { GHashTable *arg_list = NULL; const char *value = NULL; CRM_CHECK(action != NULL, return NULL); arg_list = pcmk__strkey_table(free, free); // Add action to arguments (using an alias if requested) if (device_args) { char buffer[512]; snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action); value = g_hash_table_lookup(device_args, buffer); if (value) { crm_debug("Substituting '%s' for fence action %s targeting %s", value, action, pcmk__s(target, "no node")); action = value; } } // Tell the fence agent what action to perform pcmk__insert_dup(arg_list, STONITH_ATTR_ACTION_OP, action); /* If this is a fencing operation against another node, add more standard * arguments. */ if ((target != NULL) && (device_args != NULL)) { const char *param = NULL; /* Always pass the target's name, per * https://github.com/ClusterLabs/fence-agents/blob/main/doc/FenceAgentAPI.md */ pcmk__insert_dup(arg_list, "nodename", target); // If the target's node ID was specified, pass it, too if (target_nodeid != 0) { char *nodeid = crm_strdup_printf("%" PRIu32, target_nodeid); // cts-fencing looks for this log message crm_info("Passing '%s' as nodeid with fence action '%s' targeting %s", nodeid, action, pcmk__s(target, "no node")); g_hash_table_insert(arg_list, strdup("nodeid"), nodeid); } // Check whether target should be specified as some other argument param = g_hash_table_lookup(device_args, PCMK_STONITH_HOST_ARGUMENT); if (param == NULL) { // Use caller's default (likely from agent metadata) param = host_arg; } if ((param != NULL) && !pcmk__str_eq(agent, "fence_legacy", pcmk__str_none) && !pcmk__str_eq(param, PCMK_VALUE_NONE, pcmk__str_casei)) { value = g_hash_table_lookup(device_args, param); if (pcmk__str_eq(value, "dynamic", pcmk__str_casei|pcmk__str_null_matches)) { /* If the host argument is "dynamic" or not configured, * reset it to the target */ const char *alias = NULL; if (port_map) { alias = g_hash_table_lookup(port_map, target); } if (alias == NULL) { alias = target; } crm_debug("Passing %s='%s' with fence action %s targeting %s", param, alias, action, pcmk__s(target, "no node")); pcmk__insert_dup(arg_list, param, alias); } } } if (device_args) { g_hash_table_foreach(device_args, append_config_arg, arg_list); } return arg_list; } /*! * \internal * \brief Free all memory used by a stonith action * * \param[in,out] action Action to free */ void stonith__destroy_action(stonith_action_t *action) { if (action) { free(action->agent); if (action->args) { g_hash_table_destroy(action->args); } free(action->action); if (action->svc_action) { services_action_free(action->svc_action); } pcmk__reset_result(&(action->result)); free(action); } } /*! * \internal * \brief Get the result of an executed stonith action * * \param[in] action Executed action * * \return Pointer to action's result (or NULL if \p action is NULL) */ pcmk__action_result_t * stonith__action_result(stonith_action_t *action) { return (action == NULL)? NULL : &(action->result); } #define FAILURE_MAX_RETRIES 2 /*! * \internal * \brief Create a new fencing action to be executed * * \param[in] agent Fence agent to use * \param[in] action_name Fencing action to be executed * \param[in] target Name of target of fencing action (if known) * \param[in] target_nodeid Node ID of target of fencing action (if known) * \param[in] timeout_sec Timeout to be used when executing action * \param[in] device_args Parameters to pass to fence agent * \param[in] port_map Mapping of target names to device ports * \param[in] host_arg Agent parameter used to pass target name * * \return Newly created fencing action (asserts on error, never NULL) */ stonith_action_t * stonith__action_create(const char *agent, const char *action_name, const char *target, uint32_t target_nodeid, int timeout_sec, GHashTable *device_args, GHashTable *port_map, const char *host_arg) { stonith_action_t *action = pcmk__assert_alloc(1, sizeof(stonith_action_t)); action->args = make_args(agent, action_name, target, target_nodeid, device_args, port_map, host_arg); crm_debug("Preparing '%s' action targeting %s using agent %s", action_name, pcmk__s(target, "no node"), agent); action->agent = strdup(agent); action->action = strdup(action_name); action->timeout = action->remaining_timeout = timeout_sec; action->max_retries = FAILURE_MAX_RETRIES; pcmk__set_result(&(action->result), PCMK_OCF_UNKNOWN, PCMK_EXEC_UNKNOWN, "Initialization bug in fencing library"); if (device_args) { char buffer[512]; const char *value = NULL; snprintf(buffer, sizeof(buffer), "pcmk_%s_retries", action_name); value = g_hash_table_lookup(device_args, buffer); if (value) { action->max_retries = atoi(value); } } return action; } static gboolean update_remaining_timeout(stonith_action_t * action) { int diff = time(NULL) - action->initial_start_time; if (action->tries >= action->max_retries) { crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed", action->agent, action->action, action->max_retries); action->remaining_timeout = 0; } else if ((action->result.execution_status != PCMK_EXEC_TIMEOUT) && (diff < (action->timeout * 0.7))) { /* only set remaining timeout period if there is 30% * or greater of the original timeout period left */ action->remaining_timeout = action->timeout - diff; } else { action->remaining_timeout = 0; } return action->remaining_timeout ? TRUE : FALSE; } /*! * \internal * \brief Map a fencing action result to a standard return code * * \param[in] result Fencing action result to map * * \return Standard Pacemaker return code that best corresponds to \p result */ int stonith__result2rc(const pcmk__action_result_t *result) { if (pcmk__result_ok(result)) { return pcmk_rc_ok; } switch (result->execution_status) { case PCMK_EXEC_PENDING: return EINPROGRESS; case PCMK_EXEC_CANCELLED: return ECANCELED; case PCMK_EXEC_TIMEOUT: return ETIME; case PCMK_EXEC_NOT_INSTALLED: return ENOENT; case PCMK_EXEC_NOT_SUPPORTED: return EOPNOTSUPP; case PCMK_EXEC_NOT_CONNECTED: return ENOTCONN; case PCMK_EXEC_NO_FENCE_DEVICE: return ENODEV; case PCMK_EXEC_NO_SECRETS: return EACCES; /* For the fencing API, PCMK_EXEC_INVALID is used with fencer API * operations that don't involve executing an agent (for example, * registering devices). This allows us to use the CRM_EX_* codes in the * exit status for finer-grained responses. */ case PCMK_EXEC_INVALID: switch (result->exit_status) { case CRM_EX_INVALID_PARAM: return EINVAL; case CRM_EX_INSUFFICIENT_PRIV: return EACCES; case CRM_EX_PROTOCOL: return EPROTO; /* CRM_EX_EXPIRED is used for orphaned fencing operations left * over from a previous instance of the fencer. For API backward * compatibility, this is mapped to the previously used code for * this case, EHOSTUNREACH. */ case CRM_EX_EXPIRED: return EHOSTUNREACH; default: break; } break; default: break; } // Try to provide useful error code based on result's error output if (result->action_stderr == NULL) { return ENODATA; } else if (strcasestr(result->action_stderr, "timed out") || strcasestr(result->action_stderr, "timeout")) { return ETIME; } else if (strcasestr(result->action_stderr, "unrecognised action") || strcasestr(result->action_stderr, "unrecognized action") || strcasestr(result->action_stderr, "unsupported action")) { return EOPNOTSUPP; } // Oh well, we tried return pcmk_rc_error; } /*! * \internal * \brief Determine execution status equivalent of legacy fencer return code * * Fence action notifications, and fence action callbacks from older fencers * (<=2.1.2) in a rolling upgrade, will have only a legacy return code. Map this * to an execution status as best as possible (essentially, the inverse of * stonith__result2rc()). * * \param[in] rc Legacy return code from fencer * * \return Execution status best corresponding to \p rc */ int stonith__legacy2status(int rc) { if (rc >= 0) { return PCMK_EXEC_DONE; } switch (-rc) { case EACCES: return PCMK_EXEC_NO_SECRETS; case ECANCELED: return PCMK_EXEC_CANCELLED; case EHOSTUNREACH: return PCMK_EXEC_INVALID; case EINPROGRESS: return PCMK_EXEC_PENDING; case ENODEV: return PCMK_EXEC_NO_FENCE_DEVICE; case ENOENT: return PCMK_EXEC_NOT_INSTALLED; case ENOTCONN: return PCMK_EXEC_NOT_CONNECTED; case EOPNOTSUPP: return PCMK_EXEC_NOT_SUPPORTED; case EPROTO: return PCMK_EXEC_INVALID; case EPROTONOSUPPORT: return PCMK_EXEC_NOT_SUPPORTED; case ETIME: return PCMK_EXEC_TIMEOUT; case ETIMEDOUT: return PCMK_EXEC_TIMEOUT; default: return PCMK_EXEC_ERROR; } } /*! * \internal * \brief Add a fencing result to an XML element as attributes * * \param[in,out] xml XML element to add result to * \param[in] result Fencing result to add (assume success if NULL) */ void stonith__xe_set_result(xmlNode *xml, const pcmk__action_result_t *result) { int exit_status = CRM_EX_OK; enum pcmk_exec_status execution_status = PCMK_EXEC_DONE; const char *exit_reason = NULL; const char *action_stdout = NULL; int rc = pcmk_ok; CRM_CHECK(xml != NULL, return); if (result != NULL) { exit_status = result->exit_status; execution_status = result->execution_status; exit_reason = result->exit_reason; action_stdout = result->action_stdout; rc = pcmk_rc2legacy(stonith__result2rc(result)); } crm_xml_add_int(xml, PCMK__XA_OP_STATUS, (int) execution_status); crm_xml_add_int(xml, PCMK__XA_RC_CODE, exit_status); crm_xml_add(xml, PCMK_XA_EXIT_REASON, exit_reason); crm_xml_add(xml, PCMK__XA_ST_OUTPUT, action_stdout); /* @COMPAT Peers in rolling upgrades, Pacemaker Remote nodes, and external * code that use libstonithd <=2.1.2 don't check for the full result, and * need a legacy return code instead. */ crm_xml_add_int(xml, PCMK__XA_ST_RC, rc); } /*! * \internal * \brief Find a fencing result beneath an XML element * * \param[in] xml XML element to search * * \return \p xml or descendant of it that contains a fencing result, else NULL */ xmlNode * stonith__find_xe_with_result(xmlNode *xml) { xmlNode *match = pcmk__xpath_find_one(xml->doc, "//*[@" PCMK__XA_RC_CODE "]", LOG_NEVER); if (match == NULL) { /* @COMPAT Peers <=2.1.2 in a rolling upgrade provide only a legacy * return code, not a full result, so check for that. */ match = pcmk__xpath_find_one(xml->doc, "//*[@" PCMK__XA_ST_RC "]", LOG_ERR); } return match; } /*! * \internal * \brief Get a fencing result from an XML element's attributes * * \param[in] xml XML element with fencing result * \param[out] result Where to store fencing result */ void stonith__xe_get_result(const xmlNode *xml, pcmk__action_result_t *result) { int exit_status = CRM_EX_OK; int execution_status = PCMK_EXEC_DONE; const char *exit_reason = NULL; char *action_stdout = NULL; CRM_CHECK((xml != NULL) && (result != NULL), return); exit_reason = pcmk__xe_get(xml, PCMK_XA_EXIT_REASON); - action_stdout = crm_element_value_copy(xml, PCMK__XA_ST_OUTPUT); + action_stdout = pcmk__xe_get_copy(xml, PCMK__XA_ST_OUTPUT); // A result must include an exit status and execution status if ((pcmk__xe_get_int(xml, PCMK__XA_RC_CODE, &exit_status) != pcmk_rc_ok) || (pcmk__xe_get_int(xml, PCMK__XA_OP_STATUS, &execution_status) != pcmk_rc_ok)) { int rc = pcmk_ok; exit_status = CRM_EX_ERROR; /* @COMPAT Peers <=2.1.2 in rolling upgrades provide only a legacy * return code, not a full result, so check for that. */ if (pcmk__xe_get_int(xml, PCMK__XA_ST_RC, &rc) == pcmk_rc_ok) { if ((rc == pcmk_ok) || (rc == -EINPROGRESS)) { exit_status = CRM_EX_OK; } execution_status = stonith__legacy2status(rc); exit_reason = pcmk_strerror(rc); } else { execution_status = PCMK_EXEC_ERROR; exit_reason = "Fencer reply contained neither a full result " "nor a legacy return code (bug?)"; } } pcmk__set_result(result, exit_status, execution_status, exit_reason); pcmk__set_result_output(result, action_stdout, NULL); } static void stonith_action_async_done(svc_action_t *svc_action) { stonith_action_t *action = (stonith_action_t *) svc_action->cb_data; set_result_from_svc_action(action, svc_action); svc_action->params = NULL; log_action(action, action->pid); if (!pcmk__result_ok(&(action->result)) && update_remaining_timeout(action)) { int rc = internal_stonith_action_execute(action); if (rc == pcmk_ok) { return; } } if (action->done_cb) { action->done_cb(action->pid, &(action->result), action->userdata); } action->svc_action = NULL; // don't remove our caller stonith__destroy_action(action); } static void stonith_action_async_forked(svc_action_t *svc_action) { stonith_action_t *action = (stonith_action_t *) svc_action->cb_data; action->pid = svc_action->pid; action->svc_action = svc_action; if (action->fork_cb) { (action->fork_cb) (svc_action->pid, action->userdata); } pcmk__set_result(&(action->result), PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL); crm_trace("Child process %d performing action '%s' successfully forked", action->pid, action->action); } /*! * \internal * \brief Convert a fencing library action to a services library action * * \param[in,out] action Fencing library action to convert * * \return Services library action equivalent to \p action on success; on error, * NULL will be returned and \p action's result will be set */ static svc_action_t * stonith_action_to_svc(stonith_action_t *action) { static int stonith_sequence = 0; char *path = crm_strdup_printf(PCMK__FENCE_BINDIR "/%s", action->agent); svc_action_t *svc_action = services_action_create_generic(path, NULL); free(path); if (svc_action->rc != PCMK_OCF_UNKNOWN) { set_result_from_svc_action(action, svc_action); services_action_free(svc_action); return NULL; } svc_action->timeout = action->remaining_timeout * 1000; svc_action->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_STONITH); svc_action->id = crm_strdup_printf("%s_%s_%dof%d", action->agent, action->action, action->tries, action->max_retries); svc_action->agent = pcmk__str_copy(action->agent); svc_action->sequence = stonith_sequence++; svc_action->params = action->args; svc_action->cb_data = (void *) action; svc_action->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Action", svc_action->id, svc_action->flags, SVC_ACTION_NON_BLOCKED, "SVC_ACTION_NON_BLOCKED"); return svc_action; } static int internal_stonith_action_execute(stonith_action_t * action) { int rc = pcmk_ok; int is_retry = 0; svc_action_t *svc_action = NULL; CRM_CHECK(action != NULL, return -EINVAL); if ((action->action == NULL) || (action->args == NULL) || (action->agent == NULL)) { pcmk__set_result(&(action->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR_FATAL, "Bug in fencing library"); return -EINVAL; } if (action->tries++ == 0) { // First attempt of the desired action action->initial_start_time = time(NULL); } else { // Later attempt after earlier failure crm_info("Attempt %d to execute '%s' action of agent %s " "(%ds timeout remaining)", action->tries, action->action, action->agent, action->remaining_timeout); is_retry = 1; } svc_action = stonith_action_to_svc(action); if (svc_action == NULL) { // The only possible errors are out-of-memory and too many arguments return -E2BIG; } /* keep retries from executing out of control and free previous results */ if (is_retry) { pcmk__reset_result(&(action->result)); // @TODO This should be nonblocking via timer if mainloop is used sleep(1); } if (action->async) { // We never create a recurring action, so this should always return TRUE CRM_LOG_ASSERT(services_action_async_fork_notify(svc_action, &stonith_action_async_done, &stonith_action_async_forked)); return pcmk_ok; } else if (!services_action_sync(svc_action)) { rc = -ECONNABORTED; // @TODO Update API to return more useful error } set_result_from_svc_action(action, svc_action); svc_action->params = NULL; services_action_free(svc_action); return rc; } /*! * \internal * \brief Kick off execution of an async stonith action * * \param[in,out] action Action to be executed * \param[in,out] userdata Datapointer to be passed to callbacks * \param[in] done Callback to notify action has failed/succeeded * \param[in] fork_callback Callback to notify successful fork of child * * \return pcmk_ok if ownership of action has been taken, -errno otherwise */ int stonith__execute_async(stonith_action_t * action, void *userdata, void (*done) (int pid, const pcmk__action_result_t *result, void *user_data), void (*fork_cb) (int pid, void *user_data)) { if (!action) { return -EINVAL; } action->userdata = userdata; action->done_cb = done; action->fork_cb = fork_cb; action->async = true; return internal_stonith_action_execute(action); } /*! * \internal * \brief Execute a stonith action * * \param[in,out] action Action to execute * * \return pcmk_ok on success, -errno otherwise */ int stonith__execute(stonith_action_t *action) { int rc = pcmk_ok; CRM_CHECK(action != NULL, return -EINVAL); // Keep trying until success, max retries, or timeout do { rc = internal_stonith_action_execute(action); } while ((rc != pcmk_ok) && update_remaining_timeout(action)); return rc; } diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c index 1700bb93f4..eec165b8ad 100644 --- a/lib/fencing/st_client.c +++ b/lib/fencing/st_client.c @@ -1,2735 +1,2733 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include // xmlNode #include // xmlXPathObject, etc. #include #include #include #include #include #include "fencing_private.h" CRM_TRACE_INIT_DATA(stonith); // Used as stonith_t:st_private typedef struct stonith_private_s { char *token; crm_ipc_t *ipc; mainloop_io_t *source; GHashTable *stonith_op_callback_table; GList *notify_list; int notify_refcnt; bool notify_deletes; void (*op_callback) (stonith_t * st, stonith_callback_data_t * data); } stonith_private_t; // Used as stonith_event_t:opaque struct event_private { pcmk__action_result_t result; }; typedef struct stonith_notify_client_s { const char *event; const char *obj_id; /* implement one day */ const char *obj_type; /* implement one day */ void (*notify) (stonith_t * st, stonith_event_t * e); bool delete; } stonith_notify_client_t; typedef struct stonith_callback_client_s { void (*callback) (stonith_t * st, stonith_callback_data_t * data); const char *id; void *user_data; gboolean only_success; gboolean allow_timeout_updates; struct timer_rec_s *timer; } stonith_callback_client_t; struct notify_blob_s { stonith_t *stonith; xmlNode *xml; }; struct timer_rec_s { int call_id; int timeout; guint ref; stonith_t *stonith; }; typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); bool stonith_dispatch(stonith_t * st); xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static int stonith_send_command(stonith_t *stonith, const char *op, xmlNode *data, xmlNode **output_data, int call_options, int timeout); static void stonith_connection_destroy(gpointer user_data); static void stonith_send_notification(gpointer data, gpointer user_data); static int stonith_api_del_notification(stonith_t *stonith, const char *event); /*! * \brief Get agent namespace by name * * \param[in] namespace_s Name of namespace as string * * \return Namespace as enum value */ enum stonith_namespace stonith_text2namespace(const char *namespace_s) { if (pcmk__str_eq(namespace_s, "any", pcmk__str_null_matches)) { return st_namespace_any; } else if (!strcmp(namespace_s, "redhat") || !strcmp(namespace_s, "stonith-ng")) { return st_namespace_rhcs; } else if (!strcmp(namespace_s, "internal")) { return st_namespace_internal; } else if (!strcmp(namespace_s, "heartbeat")) { return st_namespace_lha; } return st_namespace_invalid; } /*! * \brief Get agent namespace name * * \param[in] namespace Namespace as enum value * * \return Namespace name as string */ const char * stonith_namespace2text(enum stonith_namespace st_namespace) { switch (st_namespace) { case st_namespace_any: return "any"; case st_namespace_rhcs: return "stonith-ng"; case st_namespace_internal: return "internal"; case st_namespace_lha: return "heartbeat"; default: break; } return "unsupported"; } /*! * \brief Determine namespace of a fence agent * * \param[in] agent Fence agent type * \param[in] namespace_s Name of agent namespace as string, if known * * \return Namespace of specified agent, as enum value */ enum stonith_namespace stonith_get_namespace(const char *agent, const char *namespace_s) { if (pcmk__str_eq(namespace_s, "internal", pcmk__str_none)) { return st_namespace_internal; } if (stonith__agent_is_rhcs(agent)) { return st_namespace_rhcs; } #if HAVE_STONITH_STONITH_H if (stonith__agent_is_lha(agent)) { return st_namespace_lha; } #endif return st_namespace_invalid; } gboolean stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node) { gboolean rv = FALSE; stonith_t *stonith_api = st?st:stonith_api_new(); char *list = NULL; if(stonith_api) { if (stonith_api->state == stonith_disconnected) { int rc = stonith_api->cmds->connect(stonith_api, "stonith-api", NULL); if (rc != pcmk_ok) { crm_err("Failed connecting to Stonith-API for watchdog-fencing-query."); } } if (stonith_api->state != stonith_disconnected) { /* caveat!!! * this might fail when when stonithd is just updating the device-list * probably something we should fix as well for other api-calls */ int rc = stonith_api->cmds->list(stonith_api, st_opt_sync_call, STONITH_WATCHDOG_ID, &list, 0); if ((rc != pcmk_ok) || (list == NULL)) { /* due to the race described above it can happen that * we drop in here - so as not to make remote nodes * panic on that answer */ if (rc == -ENODEV) { crm_notice("Cluster does not have watchdog fencing device"); } else { crm_warn("Could not check for watchdog fencing device: %s", pcmk_strerror(rc)); } } else if (list[0] == '\0') { rv = TRUE; } else { GList *targets = stonith__parse_targets(list); rv = pcmk__str_in_list(node, targets, pcmk__str_casei); g_list_free_full(targets, free); } free(list); if (!st) { /* if we're provided the api we still might have done the * connection - but let's assume the caller won't bother */ stonith_api->cmds->disconnect(stonith_api); } } if (!st) { stonith_api_delete(stonith_api); } } else { crm_err("Stonith-API for watchdog-fencing-query couldn't be created."); } crm_trace("Pacemaker assumes node %s %sto do watchdog-fencing.", node, rv?"":"not "); return rv; } gboolean stonith__watchdog_fencing_enabled_for_node(const char *node) { return stonith__watchdog_fencing_enabled_for_node_api(NULL, node); } /* when cycling through the list we don't want to delete items so just mark them and when we know nobody is using the list loop over it to remove the marked items */ static void foreach_notify_entry (stonith_private_t *private, GFunc func, gpointer user_data) { private->notify_refcnt++; g_list_foreach(private->notify_list, func, user_data); private->notify_refcnt--; if ((private->notify_refcnt == 0) && private->notify_deletes) { GList *list_item = private->notify_list; private->notify_deletes = FALSE; while (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; GList *next = g_list_next(list_item); if (list_client->delete) { free(list_client); private->notify_list = g_list_delete_link(private->notify_list, list_item); } list_item = next; } } } static void stonith_connection_destroy(gpointer user_data) { stonith_t *stonith = user_data; stonith_private_t *native = NULL; struct notify_blob_s blob; crm_trace("Sending destroyed notification"); blob.stonith = stonith; blob.xml = pcmk__xe_create(NULL, PCMK__XE_NOTIFY); native = stonith->st_private; native->ipc = NULL; native->source = NULL; free(native->token); native->token = NULL; stonith->state = stonith_disconnected; crm_xml_add(blob.xml, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); crm_xml_add(blob.xml, PCMK__XA_SUBT, PCMK__VALUE_ST_NOTIFY_DISCONNECT); foreach_notify_entry(native, stonith_send_notification, &blob); pcmk__xml_free(blob.xml); } xmlNode * create_device_registration_xml(const char *id, enum stonith_namespace standard, const char *agent, const stonith_key_value_t *params, const char *rsc_provides) { xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES); #if HAVE_STONITH_STONITH_H if (standard == st_namespace_any) { standard = stonith_get_namespace(agent, NULL); } if (standard == st_namespace_lha) { hash2field((gpointer) "plugin", (gpointer) agent, args); agent = "fence_legacy"; } #endif crm_xml_add(data, PCMK_XA_ID, id); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK_XA_AGENT, agent); if ((standard != st_namespace_any) && (standard != st_namespace_invalid)) { crm_xml_add(data, PCMK__XA_NAMESPACE, stonith_namespace2text(standard)); } if (rsc_provides) { crm_xml_add(data, PCMK__XA_RSC_PROVIDES, rsc_provides); } for (; params; params = params->next) { hash2field((gpointer) params->key, (gpointer) params->value, args); } return data; } static int stonith_api_register_device(stonith_t *st, int call_options, const char *id, const char *namespace_s, const char *agent, const stonith_key_value_t *params) { int rc = 0; xmlNode *data = NULL; data = create_device_registration_xml(id, stonith_text2namespace(namespace_s), agent, params, NULL); rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_remove_device(stonith_t * st, int call_options, const char *name) { int rc = 0; xmlNode *data = NULL; data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK_XA_ID, name); rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_remove_level_full(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level) { int rc = 0; xmlNode *data = NULL; CRM_CHECK(node || pattern || (attr && value), return -EINVAL); data = pcmk__xe_create(NULL, PCMK_XE_FENCING_LEVEL); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); if (node) { crm_xml_add(data, PCMK_XA_TARGET, node); } else if (pattern) { crm_xml_add(data, PCMK_XA_TARGET_PATTERN, pattern); } else { crm_xml_add(data, PCMK_XA_TARGET_ATTRIBUTE, attr); crm_xml_add(data, PCMK_XA_TARGET_VALUE, value); } crm_xml_add_int(data, PCMK_XA_INDEX, level); rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_remove_level(stonith_t * st, int options, const char *node, int level) { return stonith_api_remove_level_full(st, options, node, NULL, NULL, NULL, level); } /*! * \internal * \brief Create XML for fence topology level registration request * * \param[in] node If not NULL, target level by this node name * \param[in] pattern If not NULL, target by node name using this regex * \param[in] attr If not NULL, target by this node attribute * \param[in] value If not NULL, target by this node attribute value * \param[in] level Index number of level to register * \param[in] device_list List of devices in level * * \return Newly allocated XML tree on success, NULL otherwise * * \note The caller should set only one of node, pattern or attr/value. */ xmlNode * create_level_registration_xml(const char *node, const char *pattern, const char *attr, const char *value, int level, const stonith_key_value_t *device_list) { GString *list = NULL; xmlNode *data; CRM_CHECK(node || pattern || (attr && value), return NULL); data = pcmk__xe_create(NULL, PCMK_XE_FENCING_LEVEL); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add_int(data, PCMK_XA_ID, level); crm_xml_add_int(data, PCMK_XA_INDEX, level); if (node) { crm_xml_add(data, PCMK_XA_TARGET, node); } else if (pattern) { crm_xml_add(data, PCMK_XA_TARGET_PATTERN, pattern); } else { crm_xml_add(data, PCMK_XA_TARGET_ATTRIBUTE, attr); crm_xml_add(data, PCMK_XA_TARGET_VALUE, value); } for (; device_list; device_list = device_list->next) { pcmk__add_separated_word(&list, 1024, device_list->value, ","); } if (list != NULL) { crm_xml_add(data, PCMK_XA_DEVICES, (const char *) list->str); g_string_free(list, TRUE); } return data; } static int stonith_api_register_level_full(stonith_t *st, int options, const char *node, const char *pattern, const char *attr, const char *value, int level, const stonith_key_value_t *device_list) { int rc = 0; xmlNode *data = create_level_registration_xml(node, pattern, attr, value, level, device_list); CRM_CHECK(data != NULL, return -EINVAL); rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0); pcmk__xml_free(data); return rc; } static int stonith_api_register_level(stonith_t * st, int options, const char *node, int level, const stonith_key_value_t * device_list) { return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL, level, device_list); } static int stonith_api_device_list(stonith_t *stonith, int call_options, const char *namespace_s, stonith_key_value_t **devices, int timeout) { int count = 0; enum stonith_namespace ns = stonith_text2namespace(namespace_s); if (devices == NULL) { crm_err("Parameter error: stonith_api_device_list"); return -EFAULT; } #if HAVE_STONITH_STONITH_H // Include Linux-HA agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_lha)) { count += stonith__list_lha_agents(devices); } #endif // Include Red Hat agents if requested if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) { count += stonith__list_rhcs_agents(devices); } return count; } // See stonith_api_operations_t:metadata() documentation static int stonith_api_device_metadata(stonith_t *stonith, int call_options, const char *agent, const char *namespace_s, char **output, int timeout_sec) { /* By executing meta-data directly, we can get it from stonith_admin when * the cluster is not running, which is important for higher-level tools. */ enum stonith_namespace ns = stonith_get_namespace(agent, namespace_s); if (timeout_sec <= 0) { timeout_sec = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } crm_trace("Looking up metadata for %s agent %s", stonith_namespace2text(ns), agent); switch (ns) { case st_namespace_rhcs: return stonith__rhcs_metadata(agent, timeout_sec, output); #if HAVE_STONITH_STONITH_H case st_namespace_lha: return stonith__lha_metadata(agent, timeout_sec, output); #endif default: crm_err("Can't get fence agent '%s' meta-data: No such agent", agent); break; } return -ENODEV; } static int stonith_api_query(stonith_t * stonith, int call_options, const char *target, stonith_key_value_t ** devices, int timeout) { int rc = 0, lpc = 0, max = 0; xmlNode *data = NULL; xmlNode *output = NULL; xmlXPathObject *xpathObj = NULL; CRM_CHECK(devices != NULL, return -EINVAL); data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_ST_TARGET, target); crm_xml_add(data, PCMK__XA_ST_DEVICE_ACTION, PCMK_ACTION_OFF); rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout); if (rc < 0) { return rc; } xpathObj = pcmk__xpath_search(output->doc, "//*[@" PCMK_XA_AGENT "]"); if (xpathObj) { max = pcmk__xpath_num_results(xpathObj); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); CRM_LOG_ASSERT(match != NULL); if(match != NULL) { xmlChar *match_path = xmlGetNodePath(match); crm_info("//*[@" PCMK_XA_AGENT "][%d] = %s", lpc, match_path); free(match_path); *devices = stonith_key_value_add(*devices, NULL, pcmk__xe_get(match, PCMK_XA_ID)); } } xmlXPathFreeObject(xpathObj); } pcmk__xml_free(output); pcmk__xml_free(data); return max; } /*! * \internal * \brief Make a STONITH_OP_EXEC request * * \param[in,out] stonith Fencer connection * \param[in] call_options Bitmask of \c stonith_call_options * \param[in] id Fence device ID that request is for * \param[in] action Agent action to request (list, status, monitor) * \param[in] target Name of target node for requested action * \param[in] timeout_sec Error if not completed within this many seconds * \param[out] output Where to set agent output */ static int stonith_api_call(stonith_t *stonith, int call_options, const char *id, const char *action, const char *target, int timeout_sec, xmlNode **output) { int rc = 0; xmlNode *data = NULL; data = pcmk__xe_create(NULL, PCMK__XE_ST_DEVICE_ID); crm_xml_add(data, PCMK__XA_ST_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_ST_DEVICE_ID, id); crm_xml_add(data, PCMK__XA_ST_DEVICE_ACTION, action); crm_xml_add(data, PCMK__XA_ST_TARGET, target); rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout_sec); pcmk__xml_free(data); return rc; } static int stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info, int timeout) { int rc; xmlNode *output = NULL; rc = stonith_api_call(stonith, call_options, id, PCMK_ACTION_LIST, NULL, timeout, &output); if (output && list_info) { const char *list_str; list_str = pcmk__xe_get(output, PCMK__XA_ST_OUTPUT); if (list_str) { *list_info = strdup(list_str); } } if (output) { pcmk__xml_free(output); } return rc; } static int stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout) { return stonith_api_call(stonith, call_options, id, PCMK_ACTION_MONITOR, NULL, timeout, NULL); } static int stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port, int timeout) { return stonith_api_call(stonith, call_options, id, PCMK_ACTION_STATUS, port, timeout, NULL); } static int stonith_api_fence_with_delay(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance, int delay) { int rc = 0; xmlNode *data = NULL; data = pcmk__xe_create(NULL, __func__); crm_xml_add(data, PCMK__XA_ST_TARGET, node); crm_xml_add(data, PCMK__XA_ST_DEVICE_ACTION, action); crm_xml_add_int(data, PCMK__XA_ST_TIMEOUT, timeout); crm_xml_add_int(data, PCMK__XA_ST_TOLERANCE, tolerance); crm_xml_add_int(data, PCMK__XA_ST_DELAY, delay); rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout); pcmk__xml_free(data); return rc; } static int stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout, int tolerance) { return stonith_api_fence_with_delay(stonith, call_options, node, action, timeout, tolerance, 0); } static int stonith_api_confirm(stonith_t * stonith, int call_options, const char *target) { stonith__set_call_options(call_options, target, st_opt_manual_ack); return stonith_api_fence(stonith, call_options, target, PCMK_ACTION_OFF, 0, 0); } static int stonith_api_history(stonith_t * stonith, int call_options, const char *node, stonith_history_t ** history, int timeout) { int rc = 0; xmlNode *data = NULL; xmlNode *output = NULL; stonith_history_t *last = NULL; *history = NULL; if (node) { data = pcmk__xe_create(NULL, __func__); crm_xml_add(data, PCMK__XA_ST_TARGET, node); } stonith__set_call_options(call_options, node, st_opt_sync_call); rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output, call_options, timeout); pcmk__xml_free(data); if (rc == 0) { xmlNode *op = NULL; xmlNode *reply = pcmk__xpath_find_one(output->doc, "//" PCMK__XE_ST_HISTORY, LOG_NEVER); for (op = pcmk__xe_first_child(reply, NULL, NULL, NULL); op != NULL; op = pcmk__xe_next(op, NULL)) { stonith_history_t *kvp = pcmk__assert_alloc(1, sizeof(stonith_history_t)); long long completed_nsec = 0LL; - kvp->target = crm_element_value_copy(op, PCMK__XA_ST_TARGET); - kvp->action = crm_element_value_copy(op, PCMK__XA_ST_DEVICE_ACTION); - kvp->origin = crm_element_value_copy(op, PCMK__XA_ST_ORIGIN); - kvp->delegate = crm_element_value_copy(op, PCMK__XA_ST_DELEGATE); - kvp->client = crm_element_value_copy(op, PCMK__XA_ST_CLIENTNAME); + kvp->target = pcmk__xe_get_copy(op, PCMK__XA_ST_TARGET); + kvp->action = pcmk__xe_get_copy(op, PCMK__XA_ST_DEVICE_ACTION); + kvp->origin = pcmk__xe_get_copy(op, PCMK__XA_ST_ORIGIN); + kvp->delegate = pcmk__xe_get_copy(op, PCMK__XA_ST_DELEGATE); + kvp->client = pcmk__xe_get_copy(op, PCMK__XA_ST_CLIENTNAME); pcmk__xe_get_time(op, PCMK__XA_ST_DATE, &kvp->completed); pcmk__xe_get_ll(op, PCMK__XA_ST_DATE_NSEC, &completed_nsec); if ((completed_nsec >= LONG_MIN) && (completed_nsec <= LONG_MAX)) { kvp->completed_nsec = (long) completed_nsec; } pcmk__xe_get_int(op, PCMK__XA_ST_STATE, &kvp->state); - kvp->exit_reason = crm_element_value_copy(op, PCMK_XA_EXIT_REASON); + kvp->exit_reason = pcmk__xe_get_copy(op, PCMK_XA_EXIT_REASON); if (last) { last->next = kvp; } else { *history = kvp; } last = kvp; } } pcmk__xml_free(output); return rc; } void stonith_history_free(stonith_history_t *history) { stonith_history_t *hp, *hp_old; for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) { free(hp->target); free(hp->action); free(hp->origin); free(hp->delegate); free(hp->client); free(hp->exit_reason); } } static gint stonithlib_GCompareFunc(gconstpointer a, gconstpointer b) { int rc = 0; const stonith_notify_client_t *a_client = a; const stonith_notify_client_t *b_client = b; if (a_client->delete || b_client->delete) { /* make entries marked for deletion not findable */ return -1; } CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0); rc = strcmp(a_client->event, b_client->event); if (rc == 0) { if (a_client->notify == NULL || b_client->notify == NULL) { return 0; } else if (a_client->notify == b_client->notify) { return 0; } else if (((long)a_client->notify) < ((long)b_client->notify)) { crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return -1; } crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return 1; } return rc; } xmlNode * stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options) { xmlNode *op_msg = NULL; CRM_CHECK(token != NULL, return NULL); op_msg = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); crm_xml_add(op_msg, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(op_msg, PCMK__XA_ST_OP, op); crm_xml_add_int(op_msg, PCMK__XA_ST_CALLID, call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, PCMK__XA_ST_CALLOPT, call_options); if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(op_msg, PCMK__XE_ST_CALLDATA); pcmk__xml_copy(wrapper, data); } return op_msg; } static void stonith_destroy_op_callback(gpointer data) { stonith_callback_client_t *blob = data; if (blob->timer && blob->timer->ref > 0) { g_source_remove(blob->timer->ref); } free(blob->timer); free(blob); } static int stonith_api_signoff(stonith_t * stonith) { stonith_private_t *native = stonith->st_private; crm_debug("Disconnecting from the fencer"); if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } free(native->token); native->token = NULL; stonith->state = stonith_disconnected; return pcmk_ok; } static int stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks) { stonith_private_t *private = stonith->st_private; if (all_callbacks) { private->op_callback = NULL; g_hash_table_destroy(private->stonith_op_callback_table); private->stonith_op_callback_table = pcmk__intkey_table(stonith_destroy_op_callback); } else if (call_id == 0) { private->op_callback = NULL; } else { pcmk__intkey_table_remove(private->stonith_op_callback_table, call_id); } return pcmk_ok; } /*! * \internal * \brief Invoke a (single) specified fence action callback * * \param[in,out] st Fencer API connection * \param[in] call_id If positive, call ID of completed fence action, * otherwise legacy return code for early failure * \param[in,out] result Full result for action * \param[in,out] userdata User data to pass to callback * \param[in] callback Fence action callback to invoke */ static void invoke_fence_action_callback(stonith_t *st, int call_id, pcmk__action_result_t *result, void *userdata, void (*callback) (stonith_t *st, stonith_callback_data_t *data)) { stonith_callback_data_t data = { 0, }; data.call_id = call_id; data.rc = pcmk_rc2legacy(stonith__result2rc(result)); data.userdata = userdata; data.opaque = (void *) result; callback(st, &data); } /*! * \internal * \brief Invoke any callbacks registered for a specified fence action result * * Given a fence action result from the fencer, invoke any callback registered * for that action, as well as any global callback registered. * * \param[in,out] stonith Fencer API connection * \param[in] msg If non-NULL, fencer reply * \param[in] call_id If \p msg is NULL, call ID of action that timed out */ static void invoke_registered_callbacks(stonith_t *stonith, const xmlNode *msg, int call_id) { stonith_private_t *private = NULL; stonith_callback_client_t *cb_info = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(stonith != NULL, return); CRM_CHECK(stonith->st_private != NULL, return); private = stonith->st_private; if (msg == NULL) { // Fencer didn't reply in time pcmk__set_result(&result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, "Fencer accepted request but did not reply in time"); CRM_LOG_ASSERT(call_id > 0); } else { // We have the fencer reply if ((pcmk__xe_get_int(msg, PCMK__XA_ST_CALLID, &call_id) != pcmk_rc_ok) || (call_id <= 0)) { crm_log_xml_warn(msg, "Bad fencer reply"); } stonith__xe_get_result(msg, &result); } if (call_id > 0) { cb_info = pcmk__intkey_table_lookup(private->stonith_op_callback_table, call_id); } if ((cb_info != NULL) && (cb_info->callback != NULL) && (pcmk__result_ok(&result) || !(cb_info->only_success))) { crm_trace("Invoking callback %s for call %d", pcmk__s(cb_info->id, "without ID"), call_id); invoke_fence_action_callback(stonith, call_id, &result, cb_info->user_data, cb_info->callback); } else if ((private->op_callback == NULL) && !pcmk__result_ok(&result)) { crm_warn("Fencing action without registered callback failed: %d (%s%s%s)", result.exit_status, pcmk_exec_status_str(result.execution_status), ((result.exit_reason == NULL)? "" : ": "), ((result.exit_reason == NULL)? "" : result.exit_reason)); crm_log_xml_debug(msg, "Failed fence update"); } if (private->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); invoke_fence_action_callback(stonith, call_id, &result, NULL, private->op_callback); } if (cb_info != NULL) { stonith_api_del_callback(stonith, call_id, FALSE); } pcmk__reset_result(&result); } static gboolean stonith_async_timeout_handler(gpointer data) { struct timer_rec_s *timer = data; crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout); invoke_registered_callbacks(timer->stonith, NULL, timer->call_id); /* Always return TRUE, never remove the handler * We do that in stonith_del_callback() */ return TRUE; } static void set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id, int timeout) { struct timer_rec_s *async_timer = callback->timer; if (timeout <= 0) { return; } if (!async_timer) { async_timer = pcmk__assert_alloc(1, sizeof(struct timer_rec_s)); callback->timer = async_timer; } async_timer->stonith = stonith; async_timer->call_id = call_id; /* Allow a fair bit of grace to allow the server to tell us of a timeout * This is only a fallback */ async_timer->timeout = (timeout + 60) * 1000; if (async_timer->ref) { g_source_remove(async_timer->ref); } async_timer->ref = pcmk__create_timer(async_timer->timeout, stonith_async_timeout_handler, async_timer); } static void update_callback_timeout(int call_id, int timeout, stonith_t * st) { stonith_callback_client_t *callback = NULL; stonith_private_t *private = st->st_private; callback = pcmk__intkey_table_lookup(private->stonith_op_callback_table, call_id); if (!callback || !callback->allow_timeout_updates) { return; } set_callback_timeout(callback, st, call_id, timeout); } static int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; struct notify_blob_s blob; stonith_t *st = userdata; stonith_private_t *private = NULL; pcmk__assert(st != NULL); private = st->st_private; blob.stonith = st; blob.xml = pcmk__xml_parse(buffer); if (blob.xml == NULL) { crm_warn("Received malformed message from fencer: %s", buffer); return 0; } /* do callbacks */ type = pcmk__xe_get(blob.xml, PCMK__XA_T); crm_trace("Activating %s callbacks...", type); if (pcmk__str_eq(type, PCMK__VALUE_STONITH_NG, pcmk__str_none)) { invoke_registered_callbacks(st, blob.xml, 0); } else if (pcmk__str_eq(type, PCMK__VALUE_ST_NOTIFY, pcmk__str_none)) { foreach_notify_entry(private, stonith_send_notification, &blob); } else if (pcmk__str_eq(type, PCMK__VALUE_ST_ASYNC_TIMEOUT_VALUE, pcmk__str_none)) { int call_id = 0; int timeout = 0; pcmk__xe_get_int(blob.xml, PCMK__XA_ST_TIMEOUT, &timeout); pcmk__xe_get_int(blob.xml, PCMK__XA_ST_CALLID, &call_id); update_callback_timeout(call_id, timeout, st); } else { crm_err("Unknown message type: %s", type); crm_log_xml_warn(blob.xml, "BadReply"); } pcmk__xml_free(blob.xml); return 1; } static int stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd) { int rc = pcmk_ok; stonith_private_t *native = NULL; const char *display_name = name? name : "client"; struct ipc_client_callbacks st_callbacks = { .dispatch = stonith_dispatch_internal, .destroy = stonith_connection_destroy }; CRM_CHECK(stonith != NULL, return -EINVAL); native = stonith->st_private; pcmk__assert(native != NULL); crm_debug("Attempting fencer connection by %s with%s mainloop", display_name, (stonith_fd? "out" : "")); stonith->state = stonith_connected_command; if (stonith_fd) { /* No mainloop */ native->ipc = crm_ipc_new("stonith-ng", 0); if (native->ipc != NULL) { rc = pcmk__connect_generic_ipc(native->ipc); if (rc == pcmk_rc_ok) { rc = pcmk__ipc_fd(native->ipc, stonith_fd); if (rc != pcmk_rc_ok) { crm_debug("Couldn't get file descriptor for IPC: %s", pcmk_rc_str(rc)); } } if (rc != pcmk_rc_ok) { crm_ipc_close(native->ipc); crm_ipc_destroy(native->ipc); native->ipc = NULL; } } } else { /* With mainloop */ native->source = mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { rc = -ENOTCONN; } else { xmlNode *reply = NULL; xmlNode *hello = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); crm_xml_add(hello, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(hello, PCMK__XA_ST_OP, CRM_OP_REGISTER); crm_xml_add(hello, PCMK__XA_ST_CLIENTNAME, name); rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply); if (rc < 0) { crm_debug("Couldn't register with the fencer: %s " QB_XS " rc=%d", pcmk_strerror(rc), rc); rc = -ECOMM; } else if (reply == NULL) { crm_debug("Couldn't register with the fencer: no reply"); rc = -EPROTO; } else { const char *msg_type = pcmk__xe_get(reply, PCMK__XA_ST_OP); - native->token = crm_element_value_copy(reply, PCMK__XA_ST_CLIENTID); + native->token = pcmk__xe_get_copy(reply, PCMK__XA_ST_CLIENTID); if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_none)) { crm_debug("Couldn't register with the fencer: invalid reply type '%s'", (msg_type? msg_type : "(missing)")); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else if (native->token == NULL) { crm_debug("Couldn't register with the fencer: no token in reply"); crm_log_xml_debug(reply, "Invalid fencer reply"); rc = -EPROTO; } else { crm_debug("Connection to fencer by %s succeeded (registration token: %s)", display_name, native->token); rc = pcmk_ok; } } pcmk__xml_free(reply); pcmk__xml_free(hello); } if (rc != pcmk_ok) { crm_debug("Connection attempt to fencer by %s failed: %s " QB_XS " rc=%d", display_name, pcmk_strerror(rc), rc); stonith->cmds->disconnect(stonith); } return rc; } static int stonith_set_notification(stonith_t * stonith, const char *callback, int enabled) { int rc = pcmk_ok; xmlNode *notify_msg = pcmk__xe_create(NULL, __func__); stonith_private_t *native = stonith->st_private; if (stonith->state != stonith_disconnected) { crm_xml_add(notify_msg, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); if (enabled) { crm_xml_add(notify_msg, PCMK__XA_ST_NOTIFY_ACTIVATE, callback); } else { crm_xml_add(notify_msg, PCMK__XA_ST_NOTIFY_DEACTIVATE, callback); } rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL); if (rc < 0) { crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc); rc = -ECOMM; } else { rc = pcmk_ok; } } pcmk__xml_free(notify_msg); return rc; } static int stonith_api_add_notification(stonith_t * stonith, const char *event, void (*callback) (stonith_t * stonith, stonith_event_t * e)) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; private = stonith->st_private; crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list)); new_client = pcmk__assert_alloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = callback; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); if (list_item != NULL) { crm_warn("Callback already present"); free(new_client); return -ENOTUNIQ; } else { private->notify_list = g_list_append(private->notify_list, new_client); stonith_set_notification(stonith, event, 1); crm_trace("Callback added (%d)", g_list_length(private->notify_list)); } return pcmk_ok; } static void del_notify_entry(gpointer data, gpointer user_data) { stonith_notify_client_t *entry = data; stonith_t * stonith = user_data; if (!entry->delete) { crm_debug("Removing callback for %s events", entry->event); stonith_api_del_notification(stonith, entry->event); } } static int stonith_api_del_notification(stonith_t * stonith, const char *event) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = stonith->st_private; if (event == NULL) { foreach_notify_entry(private, del_notify_entry, stonith); crm_trace("Removed callback"); return pcmk_ok; } crm_debug("Removing callback for %s events", event); new_client = pcmk__assert_alloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = NULL; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); stonith_set_notification(stonith, event, 0); if (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; if (private->notify_refcnt) { list_client->delete = TRUE; private->notify_deletes = TRUE; } else { private->notify_list = g_list_remove(private->notify_list, list_client); free(list_client); } crm_trace("Removed callback"); } else { crm_trace("Callback not present"); } free(new_client); return pcmk_ok; } static int stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options, void *user_data, const char *callback_name, void (*callback) (stonith_t * st, stonith_callback_data_t * data)) { stonith_callback_client_t *blob = NULL; stonith_private_t *private = NULL; CRM_CHECK(stonith != NULL, return -EINVAL); CRM_CHECK(stonith->st_private != NULL, return -EINVAL); private = stonith->st_private; if (call_id == 0) { // Add global callback private->op_callback = callback; } else if (call_id < 0) { // Call failed immediately, so call callback now if (!(options & st_opt_report_only_success)) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id)); pcmk__set_result(&result, CRM_EX_ERROR, stonith__legacy2status(call_id), NULL); invoke_fence_action_callback(stonith, call_id, &result, user_data, callback); } else { crm_warn("Fencer call failed: %s", pcmk_strerror(call_id)); } return FALSE; } blob = pcmk__assert_alloc(1, sizeof(stonith_callback_client_t)); blob->id = callback_name; blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE; blob->user_data = user_data; blob->callback = callback; blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE; if (timeout > 0) { set_callback_timeout(blob, stonith, call_id, timeout); } pcmk__intkey_table_insert(private->stonith_op_callback_table, call_id, blob); crm_trace("Added callback to %s for call %d", callback_name, call_id); return TRUE; } static void stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data) { int call = GPOINTER_TO_INT(key); stonith_callback_client_t *blob = value; crm_debug("Call %d (%s): pending", call, pcmk__s(blob->id, "no ID")); } void stonith_dump_pending_callbacks(stonith_t * stonith) { stonith_private_t *private = stonith->st_private; if (private->stonith_op_callback_table == NULL) { return; } return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL); } /*! * \internal * \brief Get the data section of a fencer notification * * \param[in] msg Notification XML * \param[in] ntype Notification type */ static xmlNode * get_event_data_xml(xmlNode *msg, const char *ntype) { char *data_addr = crm_strdup_printf("//%s", ntype); xmlNode *data = pcmk__xpath_find_one(msg->doc, data_addr, LOG_DEBUG); free(data_addr); return data; } /* */ static stonith_event_t * xml_to_event(xmlNode *msg) { stonith_event_t *event = pcmk__assert_alloc(1, sizeof(stonith_event_t)); struct event_private *event_private = NULL; event->opaque = pcmk__assert_alloc(1, sizeof(struct event_private)); event_private = (struct event_private *) event->opaque; crm_log_xml_trace(msg, "stonith_notify"); // All notification types have the operation result and notification subtype stonith__xe_get_result(msg, &event_private->result); - event->operation = crm_element_value_copy(msg, PCMK__XA_ST_OP); + event->operation = pcmk__xe_get_copy(msg, PCMK__XA_ST_OP); // @COMPAT The API originally provided the result as a legacy return code event->result = pcmk_rc2legacy(stonith__result2rc(&event_private->result)); // Some notification subtypes have additional information if (pcmk__str_eq(event->operation, PCMK__VALUE_ST_NOTIFY_FENCE, pcmk__str_none)) { xmlNode *data = get_event_data_xml(msg, event->operation); if (data == NULL) { crm_err("No data for %s event", event->operation); crm_log_xml_notice(msg, "BadEvent"); } else { - event->origin = crm_element_value_copy(data, PCMK__XA_ST_ORIGIN); - event->action = crm_element_value_copy(data, - PCMK__XA_ST_DEVICE_ACTION); - event->target = crm_element_value_copy(data, PCMK__XA_ST_TARGET); - event->executioner = crm_element_value_copy(data, - PCMK__XA_ST_DELEGATE); - event->id = crm_element_value_copy(data, PCMK__XA_ST_REMOTE_OP); - event->client_origin = - crm_element_value_copy(data, PCMK__XA_ST_CLIENTNAME); - event->device = crm_element_value_copy(data, PCMK__XA_ST_DEVICE_ID); + event->origin = pcmk__xe_get_copy(data, PCMK__XA_ST_ORIGIN); + event->action = pcmk__xe_get_copy(data, PCMK__XA_ST_DEVICE_ACTION); + event->target = pcmk__xe_get_copy(data, PCMK__XA_ST_TARGET); + event->executioner = pcmk__xe_get_copy(data, PCMK__XA_ST_DELEGATE); + event->id = pcmk__xe_get_copy(data, PCMK__XA_ST_REMOTE_OP); + event->client_origin = pcmk__xe_get_copy(data, + PCMK__XA_ST_CLIENTNAME); + event->device = pcmk__xe_get_copy(data, PCMK__XA_ST_DEVICE_ID); } } else if (pcmk__str_any_of(event->operation, STONITH_OP_DEVICE_ADD, STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, STONITH_OP_LEVEL_DEL, NULL)) { xmlNode *data = get_event_data_xml(msg, event->operation); if (data == NULL) { crm_err("No data for %s event", event->operation); crm_log_xml_notice(msg, "BadEvent"); } else { - event->device = crm_element_value_copy(data, PCMK__XA_ST_DEVICE_ID); + event->device = pcmk__xe_get_copy(data, PCMK__XA_ST_DEVICE_ID); } } return event; } static void event_free(stonith_event_t * event) { struct event_private *event_private = event->opaque; free(event->id); free(event->operation); free(event->origin); free(event->action); free(event->target); free(event->executioner); free(event->device); free(event->client_origin); pcmk__reset_result(&event_private->result); free(event->opaque); free(event); } static void stonith_send_notification(gpointer data, gpointer user_data) { struct notify_blob_s *blob = user_data; stonith_notify_client_t *entry = data; stonith_event_t *st_event = NULL; const char *event = NULL; if (blob->xml == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = pcmk__xe_get(blob->xml, PCMK__XA_SUBT); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->delete) { crm_trace("Skipping callback - marked for deletion"); return; } else if (entry->notify == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (!pcmk__str_eq(entry->event, event, pcmk__str_none)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } st_event = xml_to_event(blob->xml); crm_trace("Invoking callback for %p/%s event...", entry, event); entry->notify(blob->stonith, st_event); crm_trace("Callback invoked..."); event_free(st_event); } /*! * \internal * \brief Create and send an API request * * \param[in,out] stonith Stonith connection * \param[in] op API operation to request * \param[in] data Data to attach to request * \param[out] output_data If not NULL, will be set to reply if synchronous * \param[in] call_options Bitmask of stonith_call_options to use * \param[in] timeout Error if not completed within this many seconds * * \return pcmk_ok (for synchronous requests) or positive call ID * (for asynchronous requests) on success, -errno otherwise */ static int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout) { int rc = 0; int reply_id = -1; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; stonith_private_t *native = NULL; pcmk__assert((stonith != NULL) && (stonith->st_private != NULL) && (op != NULL)); native = stonith->st_private; if (output_data != NULL) { *output_data = NULL; } if ((stonith->state == stonith_disconnected) || (native->token == NULL)) { return -ENOTCONN; } /* Increment the call ID, which must be positive to avoid conflicting with * error codes. This shouldn't be a problem unless the client mucked with * it or the counter wrapped around. */ stonith->call_id++; if (stonith->call_id < 1) { stonith->call_id = 1; } op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options); if (op_msg == NULL) { return -EINVAL; } crm_xml_add_int(op_msg, PCMK__XA_ST_TIMEOUT, timeout); crm_trace("Sending %s message to fencer with timeout %ds", op, timeout); if (data) { const char *delay_s = pcmk__xe_get(data, PCMK__XA_ST_DELAY); if (delay_s) { crm_xml_add(op_msg, PCMK__XA_ST_DELAY, delay_s); } } { enum crm_ipc_flags ipc_flags = crm_ipc_flags_none; if (call_options & st_opt_sync_call) { pcmk__set_ipc_flags(ipc_flags, "stonith command", crm_ipc_client_response); } rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply); } pcmk__xml_free(op_msg); if (rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & st_opt_sync_call)) { crm_trace("Async call %d, returning", stonith->call_id); pcmk__xml_free(op_reply); return stonith->call_id; } pcmk__xe_get_int(op_reply, PCMK__XA_ST_CALLID, &reply_id); if (reply_id == stonith->call_id) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; crm_trace("Synchronous reply %d received", reply_id); stonith__xe_get_result(op_reply, &result); rc = pcmk_rc2legacy(stonith__result2rc(&result)); pcmk__reset_result(&result); if ((call_options & st_opt_discard_reply) || output_data == NULL) { crm_trace("Discarding reply"); } else { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } } else if (reply_id <= 0) { crm_err("Received bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); pcmk__xml_free(op_reply); op_reply = NULL; rc = -ENOMSG; } else { crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id); crm_log_xml_err(op_reply, "Old reply"); pcmk__xml_free(op_reply); op_reply = NULL; rc = -ENOMSG; } done: if (!crm_ipc_connected(native->ipc)) { crm_err("Fencer disconnected"); free(native->token); native->token = NULL; stonith->state = stonith_disconnected; } pcmk__xml_free(op_reply); return rc; } /* Not used with mainloop */ bool stonith_dispatch(stonith_t * st) { gboolean stay_connected = TRUE; stonith_private_t *private = NULL; pcmk__assert(st != NULL); private = st->st_private; while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); stonith_dispatch_internal(msg, strlen(msg), st); } if (!crm_ipc_connected(private->ipc)) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } static int stonith_api_free(stonith_t * stonith) { int rc = pcmk_ok; crm_trace("Destroying %p", stonith); if (stonith->state != stonith_disconnected) { crm_trace("Unregistering notifications and disconnecting %p first", stonith); stonith->cmds->remove_notification(stonith, NULL); rc = stonith->cmds->disconnect(stonith); } if (stonith->state == stonith_disconnected) { stonith_private_t *private = stonith->st_private; crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table)); g_hash_table_destroy(private->stonith_op_callback_table); crm_trace("Destroying %d notification clients", g_list_length(private->notify_list)); g_list_free_full(private->notify_list, free); free(stonith->st_private); free(stonith->cmds); free(stonith); } else { crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc); } return rc; } void stonith_api_delete(stonith_t * stonith) { crm_trace("Destroying %p", stonith); if(stonith) { stonith->cmds->free(stonith); } } static gboolean is_stonith_param(gpointer key, gpointer value, gpointer user_data) { return pcmk_stonith_param(key); } int stonith__validate(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, GHashTable *params, int timeout_sec, char **output, char **error_output) { int rc = pcmk_rc_ok; /* Use a dummy node name in case the agent requires a target. We assume the * actual target doesn't matter for validation purposes (if in practice, * that is incorrect, we will need to allow the caller to pass the target). */ const char *target = "node1"; char *host_arg = NULL; if (params != NULL) { host_arg = pcmk__str_copy(g_hash_table_lookup(params, PCMK_STONITH_HOST_ARGUMENT)); /* Remove special stonith params from the table before doing anything else */ g_hash_table_foreach_remove(params, is_stonith_param, NULL); } #if PCMK__ENABLE_CIBSECRETS rc = pcmk__substitute_secrets(rsc_id, params); if (rc != pcmk_rc_ok) { crm_warn("Could not replace secret parameters for validation of %s: %s", agent, pcmk_rc_str(rc)); // rc is standard return value, don't return it in this function } #endif if (output) { *output = NULL; } if (error_output) { *error_output = NULL; } if (timeout_sec <= 0) { timeout_sec = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } switch (stonith_get_namespace(agent, namespace_s)) { case st_namespace_rhcs: rc = stonith__rhcs_validate(st, call_options, target, agent, params, host_arg, timeout_sec, output, error_output); rc = pcmk_legacy2rc(rc); break; #if HAVE_STONITH_STONITH_H case st_namespace_lha: rc = stonith__lha_validate(st, call_options, target, agent, params, timeout_sec, output, error_output); rc = pcmk_legacy2rc(rc); break; #endif case st_namespace_invalid: errno = ENOENT; rc = errno; if (error_output) { *error_output = crm_strdup_printf("Agent %s not found", agent); } else { crm_err("Agent %s not found", agent); } break; default: errno = EOPNOTSUPP; rc = errno; if (error_output) { *error_output = crm_strdup_printf("Agent %s does not support validation", agent); } else { crm_err("Agent %s does not support validation", agent); } break; } free(host_arg); return rc; } static int stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id, const char *namespace_s, const char *agent, const stonith_key_value_t *params, int timeout_sec, char **output, char **error_output) { /* Validation should be done directly via the agent, so we can get it from * stonith_admin when the cluster is not running, which is important for * higher-level tools. */ int rc = pcmk_ok; GHashTable *params_table = pcmk__strkey_table(free, free); // Convert parameter list to a hash table for (; params; params = params->next) { if (!pcmk_stonith_param(params->key)) { pcmk__insert_dup(params_table, params->key, params->value); } } rc = stonith__validate(st, call_options, rsc_id, namespace_s, agent, params_table, timeout_sec, output, error_output); g_hash_table_destroy(params_table); return rc; } stonith_t * stonith_api_new(void) { stonith_t *new_stonith = NULL; stonith_private_t *private = NULL; new_stonith = calloc(1, sizeof(stonith_t)); if (new_stonith == NULL) { return NULL; } private = calloc(1, sizeof(stonith_private_t)); if (private == NULL) { free(new_stonith); return NULL; } new_stonith->st_private = private; private->stonith_op_callback_table = pcmk__intkey_table(stonith_destroy_op_callback); private->notify_list = NULL; private->notify_refcnt = 0; private->notify_deletes = FALSE; new_stonith->call_id = 1; new_stonith->state = stonith_disconnected; new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t)); if (new_stonith->cmds == NULL) { free(new_stonith->st_private); free(new_stonith); return NULL; } /* *INDENT-OFF* */ new_stonith->cmds->free = stonith_api_free; new_stonith->cmds->connect = stonith_api_signon; new_stonith->cmds->disconnect = stonith_api_signoff; new_stonith->cmds->list = stonith_api_list; new_stonith->cmds->monitor = stonith_api_monitor; new_stonith->cmds->status = stonith_api_status; new_stonith->cmds->fence = stonith_api_fence; new_stonith->cmds->fence_with_delay = stonith_api_fence_with_delay; new_stonith->cmds->confirm = stonith_api_confirm; new_stonith->cmds->history = stonith_api_history; new_stonith->cmds->list_agents = stonith_api_device_list; new_stonith->cmds->metadata = stonith_api_device_metadata; new_stonith->cmds->query = stonith_api_query; new_stonith->cmds->remove_device = stonith_api_remove_device; new_stonith->cmds->register_device = stonith_api_register_device; new_stonith->cmds->remove_level = stonith_api_remove_level; new_stonith->cmds->remove_level_full = stonith_api_remove_level_full; new_stonith->cmds->register_level = stonith_api_register_level; new_stonith->cmds->register_level_full = stonith_api_register_level_full; new_stonith->cmds->remove_callback = stonith_api_del_callback; new_stonith->cmds->register_callback = stonith_api_add_callback; new_stonith->cmds->remove_notification = stonith_api_del_notification; new_stonith->cmds->register_notification = stonith_api_add_notification; new_stonith->cmds->validate = stonith_api_validate; /* *INDENT-ON* */ return new_stonith; } /*! * \brief Make a blocking connection attempt to the fencer * * \param[in,out] st Fencer API object * \param[in] name Client name to use with fencer * \param[in] max_attempts Return error if this many attempts fail * * \return pcmk_ok on success, result of last attempt otherwise */ int stonith_api_connect_retry(stonith_t *st, const char *name, int max_attempts) { int rc = -EINVAL; // if max_attempts is not positive for (int attempt = 1; attempt <= max_attempts; attempt++) { rc = st->cmds->connect(st, name, NULL); if (rc == pcmk_ok) { return pcmk_ok; } else if (attempt < max_attempts) { crm_notice("Fencer connection attempt %d of %d failed (retrying in 2s): %s " QB_XS " rc=%d", attempt, max_attempts, pcmk_strerror(rc), rc); sleep(2); } } crm_notice("Could not connect to fencer: %s " QB_XS " rc=%d", pcmk_strerror(rc), rc); return rc; } stonith_key_value_t * stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value) { stonith_key_value_t *p, *end; p = pcmk__assert_alloc(1, sizeof(stonith_key_value_t)); p->key = pcmk__str_copy(key); p->value = pcmk__str_copy(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values) { stonith_key_value_t *p; while (head) { p = head->next; if (keys) { free(head->key); } if (values) { free(head->value); } free(head); head = p; } } #define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON) #define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __func__, args) int stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off) { int rc = pcmk_ok; stonith_t *st = stonith_api_new(); const char *action = off? PCMK_ACTION_OFF : PCMK_ACTION_REBOOT; api_log_open(); if (st == NULL) { api_log(LOG_ERR, "API initialization failed, could not kick (%s) node %u/%s", action, nodeid, uname); return -EPROTO; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { char *name = (uname == NULL)? pcmk__itoa(nodeid) : strdup(uname); int opts = 0; stonith__set_call_options(opts, name, st_opt_sync_call|st_opt_allow_self_fencing); if ((uname == NULL) && (nodeid > 0)) { stonith__set_call_options(opts, name, st_opt_cs_nodeid); } rc = st->cmds->fence(st, opts, name, action, timeout, 0); free(name); if (rc != pcmk_ok) { api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc); } else { api_log(LOG_NOTICE, "Node %u/%s kicked: %s", nodeid, uname, action); } } stonith_api_delete(st); return rc; } time_t stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress) { int rc = pcmk_ok; time_t when = 0; stonith_t *st = stonith_api_new(); stonith_history_t *history = NULL, *hp = NULL; if (st == NULL) { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: " "API initialization failed", nodeid, uname); return when; } rc = st->cmds->connect(st, "stonith-api", NULL); if (rc != pcmk_ok) { api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc); } else { int entries = 0; int progress = 0; int completed = 0; int opts = 0; char *name = (uname == NULL)? pcmk__itoa(nodeid) : strdup(uname); stonith__set_call_options(opts, name, st_opt_sync_call); if ((uname == NULL) && (nodeid > 0)) { stonith__set_call_options(opts, name, st_opt_cs_nodeid); } rc = st->cmds->history(st, opts, name, &history, 120); free(name); for (hp = history; hp; hp = hp->next) { entries++; if (in_progress) { progress++; if (hp->state != st_done && hp->state != st_failed) { when = time(NULL); } } else if (hp->state == st_done) { completed++; if (hp->completed > when) { when = hp->completed; } } } stonith_history_free(history); if(rc == pcmk_ok) { api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed); } else { api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc); } } stonith_api_delete(st); if(when) { api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when); } return when; } bool stonith_agent_exists(const char *agent, int timeout) { stonith_t *st = NULL; stonith_key_value_t *devices = NULL; stonith_key_value_t *dIter = NULL; bool rc = FALSE; if (agent == NULL) { return rc; } st = stonith_api_new(); if (st == NULL) { crm_err("Could not list fence agents: API memory allocation failed"); return FALSE; } st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout == 0 ? 120 : timeout); for (dIter = devices; dIter != NULL; dIter = dIter->next) { if (pcmk__str_eq(dIter->value, agent, pcmk__str_none)) { rc = TRUE; break; } } stonith_key_value_freeall(devices, 1, 1); stonith_api_delete(st); return rc; } const char * stonith_action_str(const char *action) { if (action == NULL) { return "fencing"; } else if (strcmp(action, PCMK_ACTION_ON) == 0) { return "unfencing"; } else if (strcmp(action, PCMK_ACTION_OFF) == 0) { return "turning off"; } else { return action; } } /*! * \internal * \brief Parse a target name from one line of a target list string * * \param[in] line One line of a target list string * \param[in] len String length of line * \param[in,out] output List to add newly allocated target name to */ static void parse_list_line(const char *line, int len, GList **output) { size_t i = 0; size_t entry_start = 0; /* Skip complaints about additional parameters device doesn't understand * * @TODO Document or eliminate the implied restriction of target names */ if (strstr(line, "invalid") || strstr(line, "variable")) { crm_debug("Skipping list output line: %s", line); return; } // Process line content, character by character for (i = 0; i <= len; i++) { if (isspace(line[i]) || (line[i] == ',') || (line[i] == ';') || (line[i] == '\0')) { // We've found a separator (i.e. the end of an entry) int rc = 0; char *entry = NULL; if (i == entry_start) { // Skip leading and sequential separators entry_start = i + 1; continue; } entry = pcmk__assert_alloc(i - entry_start + 1, sizeof(char)); /* Read entry, stopping at first separator * * @TODO Document or eliminate these character restrictions */ rc = sscanf(line + entry_start, "%[a-zA-Z0-9_-.]", entry); if (rc != 1) { crm_warn("Could not parse list output entry: %s " QB_XS " entry_start=%d position=%d", line + entry_start, entry_start, i); free(entry); } else if (pcmk__strcase_any_of(entry, PCMK_ACTION_ON, PCMK_ACTION_OFF, NULL)) { /* Some agents print the target status in the list output, * though none are known now (the separate list-status command * is used for this, but it can also print "UNKNOWN"). To handle * this possibility, skip such entries. * * @TODO Document or eliminate the implied restriction of target * names. */ free(entry); } else { // We have a valid entry *output = g_list_append(*output, entry); } entry_start = i + 1; } } } /*! * \internal * \brief Parse a list of targets from a string * * \param[in] list_output Target list as a string * * \return List of target names * \note The target list string format is flexible, to allow for user-specified * lists such pcmk_host_list and the output of an agent's list action * (whether direct or via the API, which escapes newlines). There may be * multiple lines, separated by either a newline or an escaped newline * (backslash n). Each line may have one or more target names, separated * by any combination of whitespace, commas, and semi-colons. Lines * containing "invalid" or "variable" will be ignored entirely. Target * names "on" or "off" (case-insensitive) will be ignored. Target names * may contain only alphanumeric characters, underbars (_), dashes (-), * and dots (.) (if any other character occurs in the name, it and all * subsequent characters in the name will be ignored). * \note The caller is responsible for freeing the result with * g_list_free_full(result, free). */ GList * stonith__parse_targets(const char *target_spec) { GList *targets = NULL; if (target_spec != NULL) { size_t out_len = strlen(target_spec); size_t line_start = 0; // Starting index of line being processed for (size_t i = 0; i <= out_len; ++i) { if ((target_spec[i] == '\n') || (target_spec[i] == '\0') || ((target_spec[i] == '\\') && (target_spec[i + 1] == 'n'))) { // We've reached the end of one line of output int len = i - line_start; if (len > 0) { char *line = strndup(target_spec + line_start, len); line[len] = '\0'; // Because it might be a newline parse_list_line(line, len, &targets); free(line); } if (target_spec[i] == '\\') { ++i; // backslash-n takes up two positions } line_start = i + 1; } } } return targets; } /*! * \internal * \brief Check whether a fencing failure was followed by an equivalent success * * \param[in] event Fencing failure * \param[in] top_history Complete fencing history (must be sorted by * stonith__sort_history() beforehand) * * \return The name of the node that executed the fencing if a later successful * event exists, or NULL if no such event exists */ const char * stonith__later_succeeded(const stonith_history_t *event, const stonith_history_t *top_history) { const char *other = NULL; for (const stonith_history_t *prev_hp = top_history; prev_hp != NULL; prev_hp = prev_hp->next) { if (prev_hp == event) { break; } if ((prev_hp->state == st_done) && pcmk__str_eq(event->target, prev_hp->target, pcmk__str_casei) && pcmk__str_eq(event->action, prev_hp->action, pcmk__str_none) && ((event->completed < prev_hp->completed) || ((event->completed == prev_hp->completed) && (event->completed_nsec < prev_hp->completed_nsec)))) { if ((event->delegate == NULL) || pcmk__str_eq(event->delegate, prev_hp->delegate, pcmk__str_casei)) { // Prefer equivalent fencing by same executioner return prev_hp->delegate; } else if (other == NULL) { // Otherwise remember first successful executioner other = (prev_hp->delegate == NULL)? "some node" : prev_hp->delegate; } } } return other; } /*! * \internal * \brief Sort fencing history, pending first then by most recently completed * * \param[in,out] history List of stonith actions * * \return New head of sorted \p history */ stonith_history_t * stonith__sort_history(stonith_history_t *history) { stonith_history_t *new = NULL, *pending = NULL, *hp, *np, *tmp; for (hp = history; hp; ) { tmp = hp->next; if ((hp->state == st_done) || (hp->state == st_failed)) { /* sort into new */ if ((!new) || (hp->completed > new->completed) || ((hp->completed == new->completed) && (hp->completed_nsec > new->completed_nsec))) { hp->next = new; new = hp; } else { np = new; do { if ((!np->next) || (hp->completed > np->next->completed) || ((hp->completed == np->next->completed) && (hp->completed_nsec > np->next->completed_nsec))) { hp->next = np->next; np->next = hp; break; } np = np->next; } while (1); } } else { /* put into pending */ hp->next = pending; pending = hp; } hp = tmp; } /* pending actions don't have a completed-stamp so make them go front */ if (pending) { stonith_history_t *last_pending = pending; while (last_pending->next) { last_pending = last_pending->next; } last_pending->next = new; new = pending; } return new; } /*! * \brief Return string equivalent of an operation state value * * \param[in] state Fencing operation state value * * \return Human-friendly string equivalent of state */ const char * stonith_op_state_str(enum op_state state) { switch (state) { case st_query: return "querying"; case st_exec: return "executing"; case st_done: return "completed"; case st_duplicate: return "duplicate"; case st_failed: return "failed"; } return "unknown"; } stonith_history_t * stonith__first_matching_event(stonith_history_t *history, bool (*matching_fn)(stonith_history_t *, void *), void *user_data) { for (stonith_history_t *hp = history; hp; hp = hp->next) { if (matching_fn(hp, user_data)) { return hp; } } return NULL; } bool stonith__event_state_pending(stonith_history_t *history, void *user_data) { return history->state != st_failed && history->state != st_done; } bool stonith__event_state_eq(stonith_history_t *history, void *user_data) { return history->state == GPOINTER_TO_INT(user_data); } bool stonith__event_state_neq(stonith_history_t *history, void *user_data) { return history->state != GPOINTER_TO_INT(user_data); } void stonith__device_parameter_flags(uint32_t *device_flags, const char *device_name, xmlNode *metadata) { xmlXPathObject *xpath = NULL; int max = 0; int lpc = 0; CRM_CHECK((device_flags != NULL) && (metadata != NULL), return); xpath = pcmk__xpath_search(metadata->doc, "//" PCMK_XE_PARAMETER); max = pcmk__xpath_num_results(xpath); if (max == 0) { xmlXPathFreeObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *parameter = NULL; xmlNode *match = pcmk__xpath_result(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if (match == NULL) { continue; } parameter = pcmk__xe_get(match, PCMK_XA_NAME); if (pcmk__str_eq(parameter, "plug", pcmk__str_casei)) { stonith__set_device_flags(*device_flags, device_name, st_device_supports_parameter_plug); } else if (pcmk__str_eq(parameter, "port", pcmk__str_casei)) { stonith__set_device_flags(*device_flags, device_name, st_device_supports_parameter_port); } } xmlXPathFreeObject(xpath); } /*! * \internal * \brief Retrieve fence agent meta-data asynchronously * * \param[in] agent Agent to execute * \param[in] timeout_sec Error if not complete within this time * \param[in] callback Function to call with result (this will always be * called, whether by this function directly or * later via the main loop, and on success the * metadata will be in its result argument's * action_stdout) * \param[in,out] user_data User data to pass to callback * * \return Standard Pacemaker return code * \note The caller must use a main loop. This function is not a * stonith_api_operations_t method because it does not need a stonith_t * object and does not go through the fencer, but executes the agent * directly. */ int stonith__metadata_async(const char *agent, int timeout_sec, void (*callback)(int pid, const pcmk__action_result_t *result, void *user_data), void *user_data) { switch (stonith_get_namespace(agent, NULL)) { case st_namespace_rhcs: { stonith_action_t *action = NULL; int rc = pcmk_ok; action = stonith__action_create(agent, PCMK_ACTION_METADATA, NULL, 0, timeout_sec, NULL, NULL, NULL); rc = stonith__execute_async(action, user_data, callback, NULL); if (rc != pcmk_ok) { callback(0, stonith__action_result(action), user_data); stonith__destroy_action(action); } return pcmk_legacy2rc(rc); } #if HAVE_STONITH_STONITH_H case st_namespace_lha: // LHA metadata is simply synthesized, so simulate async { pcmk__action_result_t result = { .exit_status = CRM_EX_OK, .execution_status = PCMK_EXEC_DONE, .exit_reason = NULL, .action_stdout = NULL, .action_stderr = NULL, }; stonith__lha_metadata(agent, timeout_sec, &result.action_stdout); callback(0, &result, user_data); pcmk__reset_result(&result); return pcmk_rc_ok; } #endif default: { pcmk__action_result_t result = { .exit_status = CRM_EX_NOSUCH, .execution_status = PCMK_EXEC_ERROR_HARD, .exit_reason = crm_strdup_printf("No such agent '%s'", agent), .action_stdout = NULL, .action_stderr = NULL, }; callback(0, &result, user_data); pcmk__reset_result(&result); return ENOENT; } } } /*! * \internal * \brief Return the exit status from an async action callback * * \param[in] data Callback data * * \return Exit status from callback data */ int stonith__exit_status(const stonith_callback_data_t *data) { if ((data == NULL) || (data->opaque == NULL)) { return CRM_EX_ERROR; } return ((pcmk__action_result_t *) data->opaque)->exit_status; } /*! * \internal * \brief Return the execution status from an async action callback * * \param[in] data Callback data * * \return Execution status from callback data */ int stonith__execution_status(const stonith_callback_data_t *data) { if ((data == NULL) || (data->opaque == NULL)) { return PCMK_EXEC_UNKNOWN; } return ((pcmk__action_result_t *) data->opaque)->execution_status; } /*! * \internal * \brief Return the exit reason from an async action callback * * \param[in] data Callback data * * \return Exit reason from callback data */ const char * stonith__exit_reason(const stonith_callback_data_t *data) { if ((data == NULL) || (data->opaque == NULL)) { return NULL; } return ((pcmk__action_result_t *) data->opaque)->exit_reason; } /*! * \internal * \brief Return the exit status from an event notification * * \param[in] event Event * * \return Exit status from event */ int stonith__event_exit_status(const stonith_event_t *event) { if ((event == NULL) || (event->opaque == NULL)) { return CRM_EX_ERROR; } else { struct event_private *event_private = event->opaque; return event_private->result.exit_status; } } /*! * \internal * \brief Return the execution status from an event notification * * \param[in] event Event * * \return Execution status from event */ int stonith__event_execution_status(const stonith_event_t *event) { if ((event == NULL) || (event->opaque == NULL)) { return PCMK_EXEC_UNKNOWN; } else { struct event_private *event_private = event->opaque; return event_private->result.execution_status; } } /*! * \internal * \brief Return the exit reason from an event notification * * \param[in] event Event * * \return Exit reason from event */ const char * stonith__event_exit_reason(const stonith_event_t *event) { if ((event == NULL) || (event->opaque == NULL)) { return NULL; } else { struct event_private *event_private = event->opaque; return event_private->result.exit_reason; } } /*! * \internal * \brief Return a human-friendly description of a fencing event * * \param[in] event Event to describe * * \return Newly allocated string with description of \p event * \note The caller is responsible for freeing the return value. * This function asserts on memory errors and never returns NULL. */ char * stonith__event_description(const stonith_event_t *event) { // Use somewhat readable defaults const char *origin = pcmk__s(event->client_origin, "a client"); const char *origin_node = pcmk__s(event->origin, "a node"); const char *executioner = pcmk__s(event->executioner, "the cluster"); const char *device = pcmk__s(event->device, "unknown"); const char *action = pcmk__s(event->action, event->operation); const char *target = pcmk__s(event->target, "no node"); const char *reason = stonith__event_exit_reason(event); const char *status; if (action == NULL) { action = "(unknown)"; } if (stonith__event_execution_status(event) != PCMK_EXEC_DONE) { status = pcmk_exec_status_str(stonith__event_execution_status(event)); } else if (stonith__event_exit_status(event) != CRM_EX_OK) { status = pcmk_exec_status_str(PCMK_EXEC_ERROR); } else { status = crm_exit_str(CRM_EX_OK); } if (pcmk__str_eq(event->operation, PCMK__VALUE_ST_NOTIFY_HISTORY, pcmk__str_none)) { return crm_strdup_printf("Fencing history may have changed"); } else if (pcmk__str_eq(event->operation, STONITH_OP_DEVICE_ADD, pcmk__str_none)) { return crm_strdup_printf("A fencing device (%s) was added", device); } else if (pcmk__str_eq(event->operation, STONITH_OP_DEVICE_DEL, pcmk__str_none)) { return crm_strdup_printf("A fencing device (%s) was removed", device); } else if (pcmk__str_eq(event->operation, STONITH_OP_LEVEL_ADD, pcmk__str_none)) { return crm_strdup_printf("A fencing topology level (%s) was added", device); } else if (pcmk__str_eq(event->operation, STONITH_OP_LEVEL_DEL, pcmk__str_none)) { return crm_strdup_printf("A fencing topology level (%s) was removed", device); } // event->operation should be PCMK__VALUE_ST_NOTIFY_FENCE at this point return crm_strdup_printf("Operation %s of %s by %s for %s@%s: %s%s%s%s (ref=%s)", action, target, executioner, origin, origin_node, status, ((reason == NULL)? "" : " ("), pcmk__s(reason, ""), ((reason == NULL)? "" : ")"), pcmk__s(event->id, "(none)")); } diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c index 2adec7f17a..71b9fb0e02 100644 --- a/lib/lrmd/lrmd_client.c +++ b/lib/lrmd/lrmd_client.c @@ -1,2672 +1,2672 @@ /* * Copyright 2012-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // uint32_t, uint64_t #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // stonith__* #include #include #include #include #include #include #define MAX_TLS_RECV_WAIT 10000 CRM_TRACE_INIT_DATA(lrmd); static int lrmd_api_disconnect(lrmd_t * lrmd); static int lrmd_api_is_connected(lrmd_t * lrmd); /* IPC proxy functions */ int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); static void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); // GnuTLS client handshake timeout in seconds #define TLS_HANDSHAKE_TIMEOUT 5 static void lrmd_tls_disconnect(lrmd_t * lrmd); static int global_remote_msg_id = 0; static void lrmd_tls_connection_destroy(gpointer userdata); static int add_tls_to_mainloop(lrmd_t *lrmd, bool do_api_handshake); typedef struct lrmd_private_s { uint64_t type; char *token; mainloop_io_t *source; /* IPC parameters */ crm_ipc_t *ipc; pcmk__remote_t *remote; /* Extra TLS parameters */ char *remote_nodename; char *server; int port; pcmk__tls_t *tls; /* while the async connection is occurring, this is the id * of the connection timeout timer. */ int async_timer; int sock; /* since tls requires a round trip across the network for a * request/reply, there are times where we just want to be able * to send a request from the client and not wait around (or even care * about) what the reply is. */ int expected_late_replies; GList *pending_notify; crm_trigger_t *process_notify; crm_trigger_t *handshake_trigger; lrmd_event_callback callback; /* Internal IPC proxy msg passing for remote guests */ void (*proxy_callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg); void *proxy_callback_userdata; char *peer_version; } lrmd_private_t; static int process_lrmd_handshake_reply(xmlNode *reply, lrmd_private_t *native); static void report_async_connection_result(lrmd_t * lrmd, int rc); static lrmd_list_t * lrmd_list_add(lrmd_list_t * head, const char *value) { lrmd_list_t *p, *end; p = pcmk__assert_alloc(1, sizeof(lrmd_list_t)); p->val = strdup(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void lrmd_list_freeall(lrmd_list_t * head) { lrmd_list_t *p; while (head) { char *val = (char *)head->val; p = head->next; free(val); free(head); head = p; } } lrmd_key_value_t * lrmd_key_value_add(lrmd_key_value_t * head, const char *key, const char *value) { lrmd_key_value_t *p, *end; p = pcmk__assert_alloc(1, sizeof(lrmd_key_value_t)); p->key = strdup(key); p->value = strdup(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void lrmd_key_value_freeall(lrmd_key_value_t * head) { lrmd_key_value_t *p; while (head) { p = head->next; free(head->key); free(head->value); free(head); head = p; } } /*! * \brief Create a new lrmd_event_data_t object * * \param[in] rsc_id ID of resource involved in event * \param[in] task Action name * \param[in] interval_ms Action interval * * \return Newly allocated and initialized lrmd_event_data_t * \note This functions asserts on memory errors, so the return value is * guaranteed to be non-NULL. The caller is responsible for freeing the * result with lrmd_free_event(). */ lrmd_event_data_t * lrmd_new_event(const char *rsc_id, const char *task, guint interval_ms) { lrmd_event_data_t *event = pcmk__assert_alloc(1, sizeof(lrmd_event_data_t)); // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees event->rsc_id = pcmk__str_copy(rsc_id); event->op_type = pcmk__str_copy(task); event->interval_ms = interval_ms; return event; } lrmd_event_data_t * lrmd_copy_event(lrmd_event_data_t * event) { lrmd_event_data_t *copy = NULL; copy = pcmk__assert_alloc(1, sizeof(lrmd_event_data_t)); copy->type = event->type; // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees copy->rsc_id = pcmk__str_copy(event->rsc_id); copy->op_type = pcmk__str_copy(event->op_type); copy->user_data = pcmk__str_copy(event->user_data); copy->output = pcmk__str_copy(event->output); copy->remote_nodename = pcmk__str_copy(event->remote_nodename); copy->exit_reason = pcmk__str_copy(event->exit_reason); copy->call_id = event->call_id; copy->timeout = event->timeout; copy->interval_ms = event->interval_ms; copy->start_delay = event->start_delay; copy->rsc_deleted = event->rsc_deleted; copy->rc = event->rc; copy->op_status = event->op_status; copy->t_run = event->t_run; copy->t_rcchange = event->t_rcchange; copy->exec_time = event->exec_time; copy->queue_time = event->queue_time; copy->connection_rc = event->connection_rc; copy->params = pcmk__str_table_dup(event->params); return copy; } /*! * \brief Free an executor event * * \param[in,out] Executor event object to free */ void lrmd_free_event(lrmd_event_data_t *event) { if (event == NULL) { return; } // @TODO Why are these const char *? free((void *) event->rsc_id); free((void *) event->op_type); free((void *) event->user_data); free((void *) event->remote_nodename); lrmd__reset_result(event); if (event->params != NULL) { g_hash_table_destroy(event->params); } free(event); } static void lrmd_dispatch_internal(gpointer data, gpointer user_data) { xmlNode *msg = data; lrmd_t *lrmd = user_data; const char *type; const char *proxy_session = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_SESSION); lrmd_private_t *native = lrmd->lrmd_private; lrmd_event_data_t event = { 0, }; if (proxy_session != NULL) { /* this is proxy business */ lrmd_internal_proxy_dispatch(lrmd, msg); return; } else if (!native->callback) { /* no callback set */ crm_trace("notify event received but client has not set callback"); return; } event.remote_nodename = native->remote_nodename; type = pcmk__xe_get(msg, PCMK__XA_LRMD_OP); pcmk__xe_get_int(msg, PCMK__XA_LRMD_CALLID, &event.call_id); event.rsc_id = pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_ID); if (pcmk__str_eq(type, LRMD_OP_RSC_REG, pcmk__str_none)) { event.type = lrmd_event_register; } else if (pcmk__str_eq(type, LRMD_OP_RSC_UNREG, pcmk__str_none)) { event.type = lrmd_event_unregister; } else if (pcmk__str_eq(type, LRMD_OP_RSC_EXEC, pcmk__str_none)) { int rc = 0; int exec_time = 0; int queue_time = 0; pcmk__xe_get_int(msg, PCMK__XA_LRMD_TIMEOUT, &event.timeout); pcmk__xe_get_guint(msg, PCMK__XA_LRMD_RSC_INTERVAL, &event.interval_ms); pcmk__xe_get_int(msg, PCMK__XA_LRMD_RSC_START_DELAY, &event.start_delay); pcmk__xe_get_int(msg, PCMK__XA_LRMD_EXEC_RC, &rc); event.rc = (enum ocf_exitcode) rc; pcmk__xe_get_int(msg, PCMK__XA_LRMD_EXEC_OP_STATUS, &event.op_status); pcmk__xe_get_int(msg, PCMK__XA_LRMD_RSC_DELETED, &event.rsc_deleted); pcmk__xe_get_time(msg, PCMK__XA_LRMD_RUN_TIME, &event.t_run); pcmk__xe_get_time(msg, PCMK__XA_LRMD_RCCHANGE_TIME, &event.t_rcchange); pcmk__xe_get_int(msg, PCMK__XA_LRMD_EXEC_TIME, &exec_time); CRM_LOG_ASSERT(exec_time >= 0); event.exec_time = QB_MAX(0, exec_time); pcmk__xe_get_int(msg, PCMK__XA_LRMD_QUEUE_TIME, &queue_time); CRM_LOG_ASSERT(queue_time >= 0); event.queue_time = QB_MAX(0, queue_time); event.op_type = pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_ACTION); event.user_data = pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_USERDATA_STR); event.type = lrmd_event_exec_complete; /* output and exit_reason may be freed by a callback */ - event.output = crm_element_value_copy(msg, PCMK__XA_LRMD_RSC_OUTPUT); + event.output = pcmk__xe_get_copy(msg, PCMK__XA_LRMD_RSC_OUTPUT); lrmd__set_result(&event, event.rc, event.op_status, pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_EXIT_REASON)); event.params = xml2list(msg); } else if (pcmk__str_eq(type, LRMD_OP_NEW_CLIENT, pcmk__str_none)) { event.type = lrmd_event_new_client; } else if (pcmk__str_eq(type, LRMD_OP_POKE, pcmk__str_none)) { event.type = lrmd_event_poke; } else { return; } crm_trace("op %s notify event received", type); native->callback(&event); if (event.params) { g_hash_table_destroy(event.params); } lrmd__reset_result(&event); } // \return Always 0, to indicate that IPC mainloop source should be kept static int lrmd_ipc_dispatch(const char *buffer, ssize_t length, gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; if (native->callback != NULL) { xmlNode *msg = pcmk__xml_parse(buffer); lrmd_dispatch_internal(msg, lrmd); pcmk__xml_free(msg); } return 0; } static void lrmd_free_xml(gpointer userdata) { pcmk__xml_free((xmlNode *) userdata); } static bool remote_executor_connected(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; return (native->remote->tls_session != NULL); } static void handle_remote_msg(xmlNode *xml, lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; const char *msg_type = NULL; msg_type = pcmk__xe_get(xml, PCMK__XA_LRMD_REMOTE_MSG_TYPE); if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) { lrmd_dispatch_internal(xml, lrmd); } else if (pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) { const char *op = pcmk__xe_get(xml, PCMK__XA_LRMD_OP); if (native->expected_late_replies > 0) { native->expected_late_replies--; /* The register op message we get as a response to lrmd_handshake_async * is a reply, so we have to handle that here. */ if (pcmk__str_eq(op, "register", pcmk__str_casei)) { int rc = process_lrmd_handshake_reply(xml, native); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); } } else { int reply_id = 0; pcmk__xe_get_int(xml, PCMK__XA_LRMD_CALLID, &reply_id); /* if this happens, we want to know about it */ crm_err("Got outdated Pacemaker Remote reply %d", reply_id); } } } /*! * \internal * \brief Notify trigger handler * * \param[in,out] userdata API connection * * \return Always return G_SOURCE_CONTINUE to leave this trigger handler in the * mainloop */ static int process_pending_notifies(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; if (native->pending_notify == NULL) { return G_SOURCE_CONTINUE; } crm_trace("Processing pending notifies"); g_list_foreach(native->pending_notify, lrmd_dispatch_internal, lrmd); g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; return G_SOURCE_CONTINUE; } /*! * \internal * \brief TLS dispatch function for file descriptor sources * * \param[in,out] userdata API connection * * \return -1 on error to remove the source from the mainloop, or 0 otherwise * to leave it in the mainloop */ static int lrmd_tls_dispatch(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *xml = NULL; int rc = pcmk_rc_ok; if (!remote_executor_connected(lrmd)) { crm_trace("TLS dispatch triggered after disconnect"); return -1; } crm_trace("TLS dispatch triggered"); rc = pcmk__remote_ready(native->remote, 0); if (rc == pcmk_rc_ok) { rc = pcmk__read_remote_message(native->remote, -1); } if (rc != pcmk_rc_ok && rc != ETIME) { crm_info("Lost %s executor connection while reading data", (native->remote_nodename? native->remote_nodename : "local")); lrmd_tls_disconnect(lrmd); return -1; } /* If rc is ETIME, there was nothing to read but we may already have a * full message in the buffer */ xml = pcmk__remote_message_xml(native->remote); if (xml == NULL) { return 0; } handle_remote_msg(xml, lrmd); pcmk__xml_free(xml); return 0; } /* Not used with mainloop */ int lrmd_poll(lrmd_t * lrmd, int timeout) { lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: return crm_ipc_ready(native->ipc); case pcmk__client_tls: if (native->pending_notify) { return 1; } else { int rc = pcmk__remote_ready(native->remote, 0); switch (rc) { case pcmk_rc_ok: return 1; case ETIME: return 0; default: return pcmk_rc2legacy(rc); } } default: crm_err("Unsupported executor connection type (bug?): %d", native->type); return -EPROTONOSUPPORT; } } /* Not used with mainloop */ bool lrmd_dispatch(lrmd_t * lrmd) { lrmd_private_t *private = NULL; pcmk__assert(lrmd != NULL); private = lrmd->lrmd_private; switch (private->type) { case pcmk__client_ipc: while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); lrmd_ipc_dispatch(msg, strlen(msg), lrmd); } } break; case pcmk__client_tls: lrmd_tls_dispatch(lrmd); break; default: crm_err("Unsupported executor connection type (bug?): %d", private->type); } if (lrmd_api_is_connected(lrmd) == FALSE) { crm_err("Connection closed"); return FALSE; } return TRUE; } static xmlNode * lrmd_create_op(const char *token, const char *op, xmlNode *data, int timeout, enum lrmd_call_options options) { xmlNode *op_msg = NULL; CRM_CHECK(token != NULL, return NULL); op_msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_COMMAND); crm_xml_add(op_msg, PCMK__XA_T, PCMK__VALUE_LRMD); crm_xml_add(op_msg, PCMK__XA_LRMD_OP, op); crm_xml_add_int(op_msg, PCMK__XA_LRMD_TIMEOUT, timeout); crm_xml_add_int(op_msg, PCMK__XA_LRMD_CALLOPT, options); if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(op_msg, PCMK__XE_LRMD_CALLDATA); pcmk__xml_copy(wrapper, data); } crm_trace("Created executor %s command with call options %.8lx (%d)", op, (long)options, options); return op_msg; } static void lrmd_ipc_connection_destroy(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: crm_info("Disconnected from local executor"); break; case pcmk__client_tls: crm_info("Disconnected from remote executor on %s", native->remote_nodename); break; default: crm_err("Unsupported executor connection type %d (bug?)", native->type); } /* Prevent these from being cleaned up in lrmd_api_disconnect() */ native->ipc = NULL; native->source = NULL; if (native->callback) { lrmd_event_data_t event = { 0, }; event.type = lrmd_event_disconnect; event.remote_nodename = native->remote_nodename; native->callback(&event); } } static void lrmd_tls_connection_destroy(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; crm_info("TLS connection destroyed"); if (native->remote->tls_session) { gnutls_bye(native->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(native->remote->tls_session); native->remote->tls_session = NULL; } if (native->tls) { pcmk__free_tls(native->tls); native->tls = NULL; } if (native->sock >= 0) { close(native->sock); } if (native->process_notify) { mainloop_destroy_trigger(native->process_notify); native->process_notify = NULL; } if (native->pending_notify) { g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } if (native->handshake_trigger != NULL) { mainloop_destroy_trigger(native->handshake_trigger); native->handshake_trigger = NULL; } free(native->remote->buffer); free(native->remote->start_state); native->remote->buffer = NULL; native->remote->start_state = NULL; native->source = 0; native->sock = -1; if (native->callback) { lrmd_event_data_t event = { 0, }; event.remote_nodename = native->remote_nodename; event.type = lrmd_event_disconnect; native->callback(&event); } return; } // \return Standard Pacemaker return code int lrmd__remote_send_xml(pcmk__remote_t *session, xmlNode *msg, uint32_t id, const char *msg_type) { crm_xml_add_int(msg, PCMK__XA_LRMD_REMOTE_MSG_ID, id); crm_xml_add(msg, PCMK__XA_LRMD_REMOTE_MSG_TYPE, msg_type); return pcmk__remote_send_xml(session, msg); } // \return Standard Pacemaker return code static int read_remote_reply(lrmd_t *lrmd, int total_timeout, int expected_reply_id, xmlNode **reply) { lrmd_private_t *native = lrmd->lrmd_private; time_t start = time(NULL); const char *msg_type = NULL; int reply_id = 0; int remaining_timeout = 0; int rc = pcmk_rc_ok; /* A timeout of 0 here makes no sense. We have to wait a period of time * for the response to come back. If -1 or 0, default to 10 seconds. */ if (total_timeout <= 0 || total_timeout > MAX_TLS_RECV_WAIT) { total_timeout = MAX_TLS_RECV_WAIT; } for (*reply = NULL; *reply == NULL; ) { *reply = pcmk__remote_message_xml(native->remote); if (*reply == NULL) { /* read some more off the tls buffer if we still have time left. */ if (remaining_timeout) { remaining_timeout = total_timeout - ((time(NULL) - start) * 1000); } else { remaining_timeout = total_timeout; } if (remaining_timeout <= 0) { return ETIME; } rc = pcmk__read_remote_message(native->remote, remaining_timeout); if (rc != pcmk_rc_ok) { return rc; } *reply = pcmk__remote_message_xml(native->remote); if (*reply == NULL) { return ENOMSG; } } pcmk__xe_get_int(*reply, PCMK__XA_LRMD_REMOTE_MSG_ID, &reply_id); msg_type = pcmk__xe_get(*reply, PCMK__XA_LRMD_REMOTE_MSG_TYPE); if (!msg_type) { crm_err("Empty msg type received while waiting for reply"); pcmk__xml_free(*reply); *reply = NULL; } else if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) { /* got a notify while waiting for reply, trigger the notify to be processed later */ crm_info("queueing notify"); native->pending_notify = g_list_append(native->pending_notify, *reply); if (native->process_notify) { crm_info("notify trigger set."); mainloop_set_trigger(native->process_notify); } *reply = NULL; } else if (!pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) { /* msg isn't a reply, make some noise */ crm_err("Expected a reply, got %s", msg_type); pcmk__xml_free(*reply); *reply = NULL; } else if (reply_id != expected_reply_id) { if (native->expected_late_replies > 0) { native->expected_late_replies--; } else { crm_err("Got outdated reply, expected id %d got id %d", expected_reply_id, reply_id); } pcmk__xml_free(*reply); *reply = NULL; } } if (native->remote->buffer && native->process_notify) { mainloop_set_trigger(native->process_notify); } return rc; } // \return Standard Pacemaker return code static int send_remote_message(lrmd_t *lrmd, xmlNode *msg) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; global_remote_msg_id++; if (global_remote_msg_id <= 0) { global_remote_msg_id = 1; } rc = lrmd__remote_send_xml(native->remote, msg, global_remote_msg_id, "request"); if (rc != pcmk_rc_ok) { crm_err("Disconnecting because TLS message could not be sent to " "Pacemaker Remote: %s", pcmk_rc_str(rc)); lrmd_tls_disconnect(lrmd); } return rc; } static int lrmd_tls_send_recv(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply) { int rc = 0; xmlNode *xml = NULL; if (!remote_executor_connected(lrmd)) { return -ENOTCONN; } rc = send_remote_message(lrmd, msg); if (rc != pcmk_rc_ok) { return pcmk_rc2legacy(rc); } rc = read_remote_reply(lrmd, timeout, global_remote_msg_id, &xml); if (rc != pcmk_rc_ok) { crm_err("Disconnecting remote after request %d reply not received: %s " QB_XS " rc=%d timeout=%dms", global_remote_msg_id, pcmk_rc_str(rc), rc, timeout); lrmd_tls_disconnect(lrmd); } if (reply) { *reply = xml; } else { pcmk__xml_free(xml); } return pcmk_rc2legacy(rc); } static int lrmd_send_xml(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: rc = crm_ipc_send(native->ipc, msg, crm_ipc_client_response, timeout, reply); break; case pcmk__client_tls: rc = lrmd_tls_send_recv(lrmd, msg, timeout, reply); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } return rc; } static int lrmd_send_xml_no_reply(lrmd_t * lrmd, xmlNode * msg) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: rc = crm_ipc_send(native->ipc, msg, crm_ipc_flags_none, 0, NULL); break; case pcmk__client_tls: rc = send_remote_message(lrmd, msg); if (rc == pcmk_rc_ok) { /* we don't want to wait around for the reply, but * since the request/reply protocol needs to behave the same * as libqb, a reply will eventually come later anyway. */ native->expected_late_replies++; } rc = pcmk_rc2legacy(rc); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } return rc; } static int lrmd_api_is_connected(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: return crm_ipc_connected(native->ipc); case pcmk__client_tls: return remote_executor_connected(lrmd); default: crm_err("Unsupported executor connection type (bug?): %d", native->type); return 0; } } /*! * \internal * \brief Send a prepared API command to the executor * * \param[in,out] lrmd Existing connection to the executor * \param[in] op Name of API command to send * \param[in] data Command data XML to add to the sent command * \param[out] output_data If expecting a reply, it will be stored here * \param[in] timeout Timeout in milliseconds (if 0, defaults to * a sensible value per the type of connection, * standard vs. pacemaker remote); * also propagated to the command XML * \param[in] call_options Call options to pass to server when sending * \param[in] expect_reply If true, wait for a reply from the server; * must be true for IPC (as opposed to TLS) clients * * \return pcmk_ok on success, -errno on error */ static int lrmd_send_command(lrmd_t *lrmd, const char *op, xmlNode *data, xmlNode **output_data, int timeout, enum lrmd_call_options options, bool expect_reply) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; if (!lrmd_api_is_connected(lrmd)) { return -ENOTCONN; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } CRM_LOG_ASSERT(native->token != NULL); crm_trace("Sending %s op to executor", op); op_msg = lrmd_create_op(native->token, op, data, timeout, options); if (op_msg == NULL) { return -EINVAL; } if (expect_reply) { rc = lrmd_send_xml(lrmd, op_msg, timeout, &op_reply); } else { rc = lrmd_send_xml_no_reply(lrmd, op_msg); goto done; } if (rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%d): %d", op, timeout, rc); goto done; } else if (op_reply == NULL) { rc = -ENOMSG; goto done; } rc = pcmk_ok; crm_trace("%s op reply received", op); if (pcmk__xe_get_int(op_reply, PCMK__XA_LRMD_RC, &rc) != pcmk_rc_ok) { rc = -ENOMSG; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (output_data) { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } done: if (lrmd_api_is_connected(lrmd) == FALSE) { crm_err("Executor disconnected"); } pcmk__xml_free(op_msg); pcmk__xml_free(op_reply); return rc; } static int lrmd_api_poke_connection(lrmd_t * lrmd) { int rc; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); rc = lrmd_send_command(lrmd, LRMD_OP_POKE, data, NULL, 0, 0, (native->type == pcmk__client_ipc)); pcmk__xml_free(data); return rc < 0 ? rc : pcmk_ok; } // \return Standard Pacemaker return code int lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash) { int rc = pcmk_rc_ok; const char *value; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *data = pcmk__xe_create(NULL, PCMK__XA_LRMD_OP); crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); value = g_hash_table_lookup(hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT); if ((value) && (stonith__watchdog_fencing_enabled_for_node(native->remote_nodename))) { crm_xml_add(data, PCMK__XA_LRMD_WATCHDOG, value); } rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0, (native->type == pcmk__client_ipc)); pcmk__xml_free(data); return (rc < 0)? pcmk_legacy2rc(rc) : pcmk_rc_ok; } static xmlNode * lrmd_handshake_hello_msg(const char *name, bool is_proxy) { xmlNode *hello = pcmk__xe_create(NULL, PCMK__XE_LRMD_COMMAND); crm_xml_add(hello, PCMK__XA_T, PCMK__VALUE_LRMD); crm_xml_add(hello, PCMK__XA_LRMD_OP, CRM_OP_REGISTER); crm_xml_add(hello, PCMK__XA_LRMD_CLIENTNAME, name); crm_xml_add(hello, PCMK__XA_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); /* advertise that we are a proxy provider */ if (is_proxy) { pcmk__xe_set_bool_attr(hello, PCMK__XA_LRMD_IS_IPC_PROVIDER, true); } return hello; } static int process_lrmd_handshake_reply(xmlNode *reply, lrmd_private_t *native) { int rc = pcmk_rc_ok; const char *version = pcmk__xe_get(reply, PCMK__XA_LRMD_PROTOCOL_VERSION); const char *msg_type = pcmk__xe_get(reply, PCMK__XA_LRMD_OP); const char *tmp_ticket = pcmk__xe_get(reply, PCMK__XA_LRMD_CLIENTID); const char *start_state = pcmk__xe_get(reply, PCMK__XA_NODE_START_STATE); pcmk__xe_get_int(reply, PCMK__XA_LRMD_RC, &rc); rc = pcmk_legacy2rc(rc); /* The remote executor may add its uptime to the XML reply, which is useful * in handling transient attributes when the connection to the remote node * unexpectedly drops. If no parameter is given, just default to -1. */ native->remote->uptime = -1; pcmk__xe_get_time(reply, PCMK__XA_UPTIME, &native->remote->uptime); if (start_state) { native->remote->start_state = strdup(start_state); } if (rc == EPROTO) { crm_err("Executor protocol version mismatch between client (%s) and server (%s)", LRMD_PROTOCOL_VERSION, version); crm_log_xml_err(reply, "Protocol Error"); } else if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) { crm_err("Invalid registration message: %s", msg_type); crm_log_xml_err(reply, "Bad reply"); rc = EPROTO; } else if (tmp_ticket == NULL) { crm_err("No registration token provided"); crm_log_xml_err(reply, "Bad reply"); rc = EPROTO; } else { crm_trace("Obtained registration token: %s", tmp_ticket); native->token = strdup(tmp_ticket); native->peer_version = strdup(version?version:"1.0"); /* Included since 1.1 */ rc = pcmk_rc_ok; } return rc; } static int lrmd_handshake(lrmd_t * lrmd, const char *name) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *reply = NULL; xmlNode *hello = lrmd_handshake_hello_msg(name, native->proxy_callback != NULL); rc = lrmd_send_xml(lrmd, hello, -1, &reply); if (rc < 0) { crm_perror(LOG_DEBUG, "Couldn't complete registration with the executor API: %d", rc); rc = ECOMM; } else if (reply == NULL) { crm_err("Did not receive registration reply"); rc = EPROTO; } else { rc = process_lrmd_handshake_reply(reply, native); } pcmk__xml_free(reply); pcmk__xml_free(hello); if (rc != pcmk_rc_ok) { lrmd_api_disconnect(lrmd); } return rc; } static int lrmd_handshake_async(lrmd_t * lrmd, const char *name) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *hello = lrmd_handshake_hello_msg(name, native->proxy_callback != NULL); rc = send_remote_message(lrmd, hello); if (rc == pcmk_rc_ok) { native->expected_late_replies++; } else { lrmd_api_disconnect(lrmd); } pcmk__xml_free(hello); return rc; } static int lrmd_ipc_connect(lrmd_t * lrmd, int *fd) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; struct ipc_client_callbacks lrmd_callbacks = { .dispatch = lrmd_ipc_dispatch, .destroy = lrmd_ipc_connection_destroy }; crm_info("Connecting to executor"); if (fd) { /* No mainloop */ native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0); if (native->ipc != NULL) { rc = pcmk__connect_generic_ipc(native->ipc); if (rc == pcmk_rc_ok) { rc = pcmk__ipc_fd(native->ipc, fd); } if (rc != pcmk_rc_ok) { crm_err("Connection to executor failed: %s", pcmk_rc_str(rc)); rc = -ENOTCONN; } } } else { native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { crm_debug("Could not connect to the executor API"); rc = -ENOTCONN; } return rc; } static void copy_gnutls_datum(gnutls_datum_t *dest, gnutls_datum_t *source) { pcmk__assert((dest != NULL) && (source != NULL) && (source->data != NULL)); dest->data = gnutls_malloc(source->size); pcmk__mem_assert(dest->data); memcpy(dest->data, source->data, source->size); dest->size = source->size; } static void clear_gnutls_datum(gnutls_datum_t *datum) { gnutls_free(datum->data); datum->data = NULL; datum->size = 0; } #define KEY_READ_LEN 256 // Chunk size for reading key from file // \return Standard Pacemaker return code static int read_gnutls_key(const char *location, gnutls_datum_t *key) { FILE *stream = NULL; size_t buf_len = KEY_READ_LEN; if ((location == NULL) || (key == NULL)) { return EINVAL; } stream = fopen(location, "r"); if (stream == NULL) { return errno; } key->data = gnutls_malloc(buf_len); key->size = 0; while (!feof(stream)) { int next = fgetc(stream); if (next == EOF) { if (!feof(stream)) { crm_warn("Pacemaker Remote key read was partially successful " "(copy in memory may be corrupted)"); } break; } if (key->size == buf_len) { buf_len = key->size + KEY_READ_LEN; key->data = gnutls_realloc(key->data, buf_len); pcmk__assert(key->data); } key->data[key->size++] = (unsigned char) next; } fclose(stream); if (key->size == 0) { clear_gnutls_datum(key); return ENOKEY; } return pcmk_rc_ok; } // Cache the most recently used Pacemaker Remote authentication key struct key_cache_s { time_t updated; // When cached key was read (valid for 1 minute) const char *location; // Where cached key was read from gnutls_datum_t key; // Cached key }; static bool key_is_cached(struct key_cache_s *key_cache) { return key_cache->updated != 0; } static bool key_cache_expired(struct key_cache_s *key_cache) { return (time(NULL) - key_cache->updated) >= 60; } static void clear_key_cache(struct key_cache_s *key_cache) { clear_gnutls_datum(&(key_cache->key)); if ((key_cache->updated != 0) || (key_cache->location != NULL)) { key_cache->updated = 0; key_cache->location = NULL; crm_debug("Cleared Pacemaker Remote key cache"); } } static void get_cached_key(struct key_cache_s *key_cache, gnutls_datum_t *key) { copy_gnutls_datum(key, &(key_cache->key)); crm_debug("Using cached Pacemaker Remote key from %s", pcmk__s(key_cache->location, "unknown location")); } static void cache_key(struct key_cache_s *key_cache, gnutls_datum_t *key, const char *location) { key_cache->updated = time(NULL); key_cache->location = location; copy_gnutls_datum(&(key_cache->key), key); crm_debug("Using (and cacheing) Pacemaker Remote key from %s", pcmk__s(location, "unknown location")); } /*! * \internal * \brief Get Pacemaker Remote authentication key from file or cache * * \param[in] location Path to key file to try (this memory must * persist across all calls of this function) * \param[out] key Key from location or cache * * \return Standard Pacemaker return code */ static int get_remote_key(const char *location, gnutls_datum_t *key) { static struct key_cache_s key_cache = { 0, }; int rc = pcmk_rc_ok; if ((location == NULL) || (key == NULL)) { return EINVAL; } if (key_is_cached(&key_cache)) { if (key_cache_expired(&key_cache)) { clear_key_cache(&key_cache); } else { get_cached_key(&key_cache, key); return pcmk_rc_ok; } } rc = read_gnutls_key(location, key); if (rc != pcmk_rc_ok) { return rc; } cache_key(&key_cache, key, location); return pcmk_rc_ok; } /*! * \internal * \brief Initialize the Pacemaker Remote authentication key * * Try loading the Pacemaker Remote authentication key from cache if available, * otherwise from these locations, in order of preference: * * - The value of the PCMK_authkey_location environment variable, if set * - The Pacemaker default key file location * * \param[out] key Where to store key * * \return Standard Pacemaker return code */ int lrmd__init_remote_key(gnutls_datum_t *key) { static const char *env_location = NULL; static bool need_env = true; int rc = pcmk_rc_ok; if (need_env) { env_location = pcmk__env_option(PCMK__ENV_AUTHKEY_LOCATION); need_env = false; } // Try location in environment variable, if set if (env_location != NULL) { rc = get_remote_key(env_location, key); if (rc == pcmk_rc_ok) { return pcmk_rc_ok; } crm_warn("Could not read Pacemaker Remote key from %s: %s", env_location, pcmk_rc_str(rc)); return ENOKEY; } // Try default location, if environment wasn't explicitly set to it rc = get_remote_key(DEFAULT_REMOTE_KEY_LOCATION, key); if (rc == pcmk_rc_ok) { return pcmk_rc_ok; } crm_warn("Could not read Pacemaker Remote key from default location %s: %s", DEFAULT_REMOTE_KEY_LOCATION, pcmk_rc_str(rc)); return ENOKEY; } static void report_async_connection_result(lrmd_t * lrmd, int rc) { lrmd_private_t *native = lrmd->lrmd_private; if (native->callback) { lrmd_event_data_t event = { 0, }; event.type = lrmd_event_connect; event.remote_nodename = native->remote_nodename; event.connection_rc = rc; native->callback(&event); } } static void tls_handshake_failed(lrmd_t *lrmd, int tls_rc, int rc) { lrmd_private_t *native = lrmd->lrmd_private; crm_warn("Disconnecting after TLS handshake with " "Pacemaker Remote server %s:%d failed: %s", native->server, native->port, (rc == EPROTO)? gnutls_strerror(tls_rc) : pcmk_rc_str(rc)); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); gnutls_deinit(native->remote->tls_session); native->remote->tls_session = NULL; lrmd_tls_connection_destroy(lrmd); } static void tls_handshake_succeeded(lrmd_t *lrmd) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; /* Now that the handshake is done, see if any client TLS certificate is * close to its expiration date and log if so. If a TLS certificate is not * in use, this function will just return so we don't need to check for the * session type here. */ pcmk__tls_check_cert_expiration(native->remote->tls_session); crm_info("TLS connection to Pacemaker Remote server %s:%d succeeded", native->server, native->port); rc = add_tls_to_mainloop(lrmd, true); /* If add_tls_to_mainloop failed, report that right now. Otherwise, we have * to wait until we read the async reply to report anything. */ if (rc != pcmk_rc_ok) { report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); } } /*! * \internal * \brief Perform a TLS client handshake with a Pacemaker Remote server * * \param[in] lrmd Newly established Pacemaker Remote executor connection * * \return Standard Pacemaker return code */ static int tls_client_handshake(lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; int tls_rc = GNUTLS_E_SUCCESS; int rc = pcmk__tls_client_handshake(native->remote, TLS_HANDSHAKE_TIMEOUT, &tls_rc); if (rc != pcmk_rc_ok) { tls_handshake_failed(lrmd, tls_rc, rc); } return rc; } /*! * \internal * \brief Add trigger and file descriptor mainloop sources for TLS * * \param[in,out] lrmd API connection with established TLS session * \param[in] do_api_handshake Whether to perform executor handshake * * \return Standard Pacemaker return code */ static int add_tls_to_mainloop(lrmd_t *lrmd, bool do_api_handshake) { lrmd_private_t *native = lrmd->lrmd_private; int rc = pcmk_rc_ok; char *name = crm_strdup_printf("pacemaker-remote-%s:%d", native->server, native->port); struct mainloop_fd_callbacks tls_fd_callbacks = { .dispatch = lrmd_tls_dispatch, .destroy = lrmd_tls_connection_destroy, }; native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH, process_pending_notifies, lrmd); native->source = mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd, &tls_fd_callbacks); /* Async connections lose the client name provided by the API caller, so we * have to use our generated name here to perform the executor handshake. * * @TODO Keep track of the caller-provided name. Perhaps we should be using * that name in this function instead of generating one anyway. */ if (do_api_handshake) { rc = lrmd_handshake_async(lrmd, name); } free(name); return rc; } struct handshake_data_s { lrmd_t *lrmd; time_t start_time; int timeout_sec; }; static gboolean try_handshake_cb(gpointer user_data) { struct handshake_data_s *hs = user_data; lrmd_t *lrmd = hs->lrmd; lrmd_private_t *native = lrmd->lrmd_private; pcmk__remote_t *remote = native->remote; int rc = pcmk_rc_ok; int tls_rc = GNUTLS_E_SUCCESS; if (time(NULL) >= hs->start_time + hs->timeout_sec) { rc = ETIME; tls_handshake_failed(lrmd, GNUTLS_E_TIMEDOUT, rc); free(hs); return 0; } rc = pcmk__tls_client_try_handshake(remote, &tls_rc); if (rc == pcmk_rc_ok) { tls_handshake_succeeded(lrmd); free(hs); return 0; } else if (rc == EAGAIN) { mainloop_set_trigger(native->handshake_trigger); return 1; } else { rc = EKEYREJECTED; tls_handshake_failed(lrmd, tls_rc, rc); free(hs); return 0; } } static void lrmd_tcp_connect_cb(void *userdata, int rc, int sock) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; int tls_rc = GNUTLS_E_SUCCESS; bool use_cert = pcmk__x509_enabled(); native->async_timer = 0; if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); crm_info("Could not connect to Pacemaker Remote at %s:%d: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); return; } /* The TCP connection was successful, so establish the TLS connection. */ native->sock = sock; if (native->tls == NULL) { rc = pcmk__init_tls(&native->tls, false, use_cert ? GNUTLS_CRD_CERTIFICATE : GNUTLS_CRD_PSK); if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); return; } } if (!use_cert) { gnutls_datum_t psk_key = { NULL, 0 }; rc = lrmd__init_remote_key(&psk_key); if (rc != pcmk_rc_ok) { crm_info("Could not connect to Pacemaker Remote at %s:%d: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); return; } pcmk__tls_add_psk_key(native->tls, &psk_key); gnutls_free(psk_key.data); } native->remote->tls_session = pcmk__new_tls_session(native->tls, sock); if (native->remote->tls_session == NULL) { lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, -EPROTO); return; } /* If the TLS handshake immediately succeeds or fails, we can handle that * now without having to deal with mainloops and retries. Otherwise, add a * trigger to keep trying until we get a result (or it times out). */ rc = pcmk__tls_client_try_handshake(native->remote, &tls_rc); if (rc == EAGAIN) { struct handshake_data_s *hs = NULL; if (native->handshake_trigger != NULL) { return; } hs = pcmk__assert_alloc(1, sizeof(struct handshake_data_s)); hs->lrmd = lrmd; hs->start_time = time(NULL); hs->timeout_sec = TLS_HANDSHAKE_TIMEOUT; native->handshake_trigger = mainloop_add_trigger(G_PRIORITY_LOW, try_handshake_cb, hs); mainloop_set_trigger(native->handshake_trigger); } else if (rc == pcmk_rc_ok) { tls_handshake_succeeded(lrmd); } else { tls_handshake_failed(lrmd, tls_rc, rc); } } static int lrmd_tls_connect_async(lrmd_t * lrmd, int timeout /*ms */ ) { int rc = pcmk_rc_ok; int timer_id = 0; lrmd_private_t *native = lrmd->lrmd_private; native->sock = -1; rc = pcmk__connect_remote(native->server, native->port, timeout, &timer_id, &(native->sock), lrmd, lrmd_tcp_connect_cb); if (rc != pcmk_rc_ok) { crm_warn("Pacemaker Remote connection to %s:%d failed: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); return rc; } native->async_timer = timer_id; return rc; } static int lrmd_tls_connect(lrmd_t * lrmd, int *fd) { int rc = pcmk_rc_ok; bool use_cert = pcmk__x509_enabled(); lrmd_private_t *native = lrmd->lrmd_private; native->sock = -1; rc = pcmk__connect_remote(native->server, native->port, 0, NULL, &(native->sock), NULL, NULL); if (rc != pcmk_rc_ok) { crm_warn("Pacemaker Remote connection to %s:%d failed: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); lrmd_tls_connection_destroy(lrmd); return ENOTCONN; } if (native->tls == NULL) { rc = pcmk__init_tls(&native->tls, false, use_cert ? GNUTLS_CRD_CERTIFICATE : GNUTLS_CRD_PSK); if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); return rc; } } if (!use_cert) { gnutls_datum_t psk_key = { NULL, 0 }; rc = lrmd__init_remote_key(&psk_key); if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); return rc; } pcmk__tls_add_psk_key(native->tls, &psk_key); gnutls_free(psk_key.data); } native->remote->tls_session = pcmk__new_tls_session(native->tls, native->sock); if (native->remote->tls_session == NULL) { lrmd_tls_connection_destroy(lrmd); return EPROTO; } if (tls_client_handshake(lrmd) != pcmk_rc_ok) { return EKEYREJECTED; } crm_info("Client TLS connection established with Pacemaker Remote server %s:%d", native->server, native->port); if (fd) { *fd = native->sock; } else { rc = add_tls_to_mainloop(lrmd, false); } return rc; } static int lrmd_api_connect(lrmd_t * lrmd, const char *name, int *fd) { int rc = -ENOTCONN; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: rc = lrmd_ipc_connect(lrmd, fd); break; case pcmk__client_tls: rc = lrmd_tls_connect(lrmd, fd); rc = pcmk_rc2legacy(rc); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } if (rc == pcmk_ok) { rc = lrmd_handshake(lrmd, name); rc = pcmk_rc2legacy(rc); } return rc; } static int lrmd_api_connect_async(lrmd_t * lrmd, const char *name, int timeout) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; CRM_CHECK(native && native->callback, return -EINVAL); switch (native->type) { case pcmk__client_ipc: /* fake async connection with ipc. it should be fast * enough that we gain very little from async */ rc = lrmd_api_connect(lrmd, name, NULL); if (!rc) { report_async_connection_result(lrmd, rc); } break; case pcmk__client_tls: rc = lrmd_tls_connect_async(lrmd, timeout); rc = pcmk_rc2legacy(rc); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } return rc; } static void lrmd_ipc_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } } static void lrmd_tls_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote->tls_session) { gnutls_bye(native->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(native->remote->tls_session); native->remote->tls_session = NULL; } if (native->async_timer) { g_source_remove(native->async_timer); native->async_timer = 0; } if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; } else if (native->sock >= 0) { close(native->sock); native->sock = -1; } if (native->pending_notify) { g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } } static int lrmd_api_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; int rc = pcmk_ok; switch (native->type) { case pcmk__client_ipc: crm_debug("Disconnecting from local executor"); lrmd_ipc_disconnect(lrmd); break; case pcmk__client_tls: crm_debug("Disconnecting from remote executor on %s", native->remote_nodename); lrmd_tls_disconnect(lrmd); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } free(native->token); native->token = NULL; free(native->peer_version); native->peer_version = NULL; return rc; } static int lrmd_api_register_rsc(lrmd_t * lrmd, const char *rsc_id, const char *class, const char *provider, const char *type, enum lrmd_call_options options) { int rc = pcmk_ok; xmlNode *data = NULL; if (!class || !type || !rsc_id) { return -EINVAL; } if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) && (provider == NULL)) { return -EINVAL; } data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id); crm_xml_add(data, PCMK__XA_LRMD_CLASS, class); crm_xml_add(data, PCMK__XA_LRMD_PROVIDER, provider); crm_xml_add(data, PCMK__XA_LRMD_TYPE, type); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_REG, data, NULL, 0, options, true); pcmk__xml_free(data); return rc; } static int lrmd_api_unregister_rsc(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_UNREG, data, NULL, 0, options, true); pcmk__xml_free(data); return rc; } lrmd_rsc_info_t * lrmd_new_rsc_info(const char *rsc_id, const char *standard, const char *provider, const char *type) { lrmd_rsc_info_t *rsc_info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t)); rsc_info->id = pcmk__str_copy(rsc_id); rsc_info->standard = pcmk__str_copy(standard); rsc_info->provider = pcmk__str_copy(provider); rsc_info->type = pcmk__str_copy(type); return rsc_info; } lrmd_rsc_info_t * lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info) { return lrmd_new_rsc_info(rsc_info->id, rsc_info->standard, rsc_info->provider, rsc_info->type); } void lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info) { if (!rsc_info) { return; } free(rsc_info->id); free(rsc_info->type); free(rsc_info->standard); free(rsc_info->provider); free(rsc_info); } static lrmd_rsc_info_t * lrmd_api_get_rsc_info(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc_info = NULL; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); xmlNode *output = NULL; const char *class = NULL; const char *provider = NULL; const char *type = NULL; crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id); lrmd_send_command(lrmd, LRMD_OP_RSC_INFO, data, &output, 0, options, true); pcmk__xml_free(data); if (!output) { return NULL; } class = pcmk__xe_get(output, PCMK__XA_LRMD_CLASS); provider = pcmk__xe_get(output, PCMK__XA_LRMD_PROVIDER); type = pcmk__xe_get(output, PCMK__XA_LRMD_TYPE); if (!class || !type) { pcmk__xml_free(output); return NULL; } else if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) && !provider) { pcmk__xml_free(output); return NULL; } rsc_info = lrmd_new_rsc_info(rsc_id, class, provider, type); pcmk__xml_free(output); return rsc_info; } void lrmd_free_op_info(lrmd_op_info_t *op_info) { if (op_info) { free(op_info->rsc_id); free(op_info->action); free(op_info->interval_ms_s); free(op_info->timeout_ms_s); free(op_info); } } static int lrmd_api_get_recurring_ops(lrmd_t *lrmd, const char *rsc_id, int timeout_ms, enum lrmd_call_options options, GList **output) { xmlNode *data = NULL; xmlNode *output_xml = NULL; int rc = pcmk_ok; if (output == NULL) { return -EINVAL; } *output = NULL; // Send request if (rsc_id) { data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id); } rc = lrmd_send_command(lrmd, LRMD_OP_GET_RECURRING, data, &output_xml, timeout_ms, options, true); if (data) { pcmk__xml_free(data); } // Process reply if ((rc != pcmk_ok) || (output_xml == NULL)) { return rc; } for (const xmlNode *rsc_xml = pcmk__xe_first_child(output_xml, PCMK__XE_LRMD_RSC, NULL, NULL); (rsc_xml != NULL) && (rc == pcmk_ok); rsc_xml = pcmk__xe_next(rsc_xml, PCMK__XE_LRMD_RSC)) { rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); if (rsc_id == NULL) { crm_err("Could not parse recurring operation information from executor"); continue; } for (const xmlNode *op_xml = pcmk__xe_first_child(rsc_xml, PCMK__XE_LRMD_RSC_OP, NULL, NULL); op_xml != NULL; op_xml = pcmk__xe_next(op_xml, PCMK__XE_LRMD_RSC_OP)) { lrmd_op_info_t *op_info = calloc(1, sizeof(lrmd_op_info_t)); if (op_info == NULL) { rc = -ENOMEM; break; } op_info->rsc_id = strdup(rsc_id); - op_info->action = crm_element_value_copy(op_xml, - PCMK__XA_LRMD_RSC_ACTION); + op_info->action = pcmk__xe_get_copy(op_xml, + PCMK__XA_LRMD_RSC_ACTION); op_info->interval_ms_s = - crm_element_value_copy(op_xml, PCMK__XA_LRMD_RSC_INTERVAL); - op_info->timeout_ms_s = - crm_element_value_copy(op_xml, PCMK__XA_LRMD_TIMEOUT); + pcmk__xe_get_copy(op_xml, PCMK__XA_LRMD_RSC_INTERVAL); + op_info->timeout_ms_s = pcmk__xe_get_copy(op_xml, + PCMK__XA_LRMD_TIMEOUT); *output = g_list_prepend(*output, op_info); } } pcmk__xml_free(output_xml); return rc; } static void lrmd_api_set_callback(lrmd_t * lrmd, lrmd_event_callback callback) { lrmd_private_t *native = lrmd->lrmd_private; native->callback = callback; } void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)) { lrmd_private_t *native = lrmd->lrmd_private; native->proxy_callback = callback; native->proxy_callback_userdata = userdata; } void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg) { lrmd_private_t *native = lrmd->lrmd_private; if (native->proxy_callback) { crm_log_xml_trace(msg, "PROXY_INBOUND"); native->proxy_callback(lrmd, native->proxy_callback_userdata, msg); } } int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg) { if (lrmd == NULL) { return -ENOTCONN; } crm_xml_add(msg, PCMK__XA_LRMD_OP, CRM_OP_IPC_FWD); crm_log_xml_trace(msg, "PROXY_OUTBOUND"); return lrmd_send_xml_no_reply(lrmd, msg); } static int stonith_get_metadata(const char *provider, const char *type, char **output) { int rc = pcmk_ok; stonith_t *stonith_api = stonith_api_new(); if (stonith_api == NULL) { crm_err("Could not get fence agent meta-data: API memory allocation failed"); return -ENOMEM; } rc = stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type, provider, output, 0); if ((rc == pcmk_ok) && (*output == NULL)) { rc = -EIO; } stonith_api->cmds->free(stonith_api); return rc; } static int lrmd_api_get_metadata(lrmd_t *lrmd, const char *standard, const char *provider, const char *type, char **output, enum lrmd_call_options options) { return lrmd->cmds->get_metadata_params(lrmd, standard, provider, type, output, options, NULL); } static int lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard, const char *provider, const char *type, char **output, enum lrmd_call_options options, lrmd_key_value_t *params) { svc_action_t *action = NULL; GHashTable *params_table = NULL; if (!standard || !type) { lrmd_key_value_freeall(params); return -EINVAL; } if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { lrmd_key_value_freeall(params); return stonith_get_metadata(provider, type, output); } params_table = pcmk__strkey_table(free, free); for (const lrmd_key_value_t *param = params; param; param = param->next) { pcmk__insert_dup(params_table, param->key, param->value); } action = services__create_resource_action(type, standard, provider, type, PCMK_ACTION_META_DATA, 0, PCMK_DEFAULT_ACTION_TIMEOUT_MS, params_table, 0); lrmd_key_value_freeall(params); if (action == NULL) { return -ENOMEM; } if (action->rc != PCMK_OCF_UNKNOWN) { services_action_free(action); return -EINVAL; } if (!services_action_sync(action)) { crm_err("Failed to retrieve meta-data for %s:%s:%s", standard, provider, type); services_action_free(action); return -EIO; } if (!action->stdout_data) { crm_err("Failed to receive meta-data for %s:%s:%s", standard, provider, type); services_action_free(action); return -EIO; } *output = strdup(action->stdout_data); services_action_free(action); return pcmk_ok; } static int lrmd_api_exec(lrmd_t *lrmd, const char *rsc_id, const char *action, const char *userdata, guint interval_ms, int timeout, /* ms */ int start_delay, /* ms */ enum lrmd_call_options options, lrmd_key_value_t * params) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES); lrmd_key_value_t *tmp = NULL; crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id); crm_xml_add(data, PCMK__XA_LRMD_RSC_ACTION, action); crm_xml_add(data, PCMK__XA_LRMD_RSC_USERDATA_STR, userdata); crm_xml_add_ms(data, PCMK__XA_LRMD_RSC_INTERVAL, interval_ms); crm_xml_add_int(data, PCMK__XA_LRMD_TIMEOUT, timeout); crm_xml_add_int(data, PCMK__XA_LRMD_RSC_START_DELAY, start_delay); for (tmp = params; tmp; tmp = tmp->next) { hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args); } rc = lrmd_send_command(lrmd, LRMD_OP_RSC_EXEC, data, NULL, timeout, options, true); pcmk__xml_free(data); lrmd_key_value_freeall(params); return rc; } /* timeout is in ms */ static int lrmd_api_exec_alert(lrmd_t *lrmd, const char *alert_id, const char *alert_path, int timeout, lrmd_key_value_t *params) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_ALERT); xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES); lrmd_key_value_t *tmp = NULL; crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_ALERT_ID, alert_id); crm_xml_add(data, PCMK__XA_LRMD_ALERT_PATH, alert_path); crm_xml_add_int(data, PCMK__XA_LRMD_TIMEOUT, timeout); for (tmp = params; tmp; tmp = tmp->next) { hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args); } rc = lrmd_send_command(lrmd, LRMD_OP_ALERT_EXEC, data, NULL, timeout, lrmd_opt_notify_orig_only, true); pcmk__xml_free(data); lrmd_key_value_freeall(params); return rc; } static int lrmd_api_cancel(lrmd_t *lrmd, const char *rsc_id, const char *action, guint interval_ms) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__); crm_xml_add(data, PCMK__XA_LRMD_RSC_ACTION, action); crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id); crm_xml_add_ms(data, PCMK__XA_LRMD_RSC_INTERVAL, interval_ms); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_CANCEL, data, NULL, 0, 0, true); pcmk__xml_free(data); return rc; } static int list_stonith_agents(lrmd_list_t ** resources) { int rc = 0; stonith_t *stonith_api = stonith_api_new(); stonith_key_value_t *stonith_resources = NULL; stonith_key_value_t *dIter = NULL; if (stonith_api == NULL) { crm_err("Could not list fence agents: API memory allocation failed"); return -ENOMEM; } stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL, &stonith_resources, 0); stonith_api->cmds->free(stonith_api); for (dIter = stonith_resources; dIter; dIter = dIter->next) { rc++; if (resources) { *resources = lrmd_list_add(*resources, dIter->value); } } stonith_key_value_freeall(stonith_resources, 1, 0); return rc; } static int lrmd_api_list_agents(lrmd_t * lrmd, lrmd_list_t ** resources, const char *class, const char *provider) { int rc = 0; int stonith_count = 0; // Initially, whether to include stonith devices if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { stonith_count = 1; } else { GList *gIter = NULL; GList *agents = resources_list_agents(class, provider); for (gIter = agents; gIter != NULL; gIter = gIter->next) { *resources = lrmd_list_add(*resources, (const char *)gIter->data); rc++; } g_list_free_full(agents, free); if (!class) { stonith_count = 1; } } if (stonith_count) { // Now, if stonith devices are included, how many there are stonith_count = list_stonith_agents(resources); if (stonith_count > 0) { rc += stonith_count; } } if (rc == 0) { crm_notice("No agents found for class %s", class); rc = -EPROTONOSUPPORT; } return rc; } static bool does_provider_have_agent(const char *agent, const char *provider, const char *class) { bool found = false; GList *agents = NULL; GList *gIter2 = NULL; agents = resources_list_agents(class, provider); for (gIter2 = agents; gIter2 != NULL; gIter2 = gIter2->next) { if (pcmk__str_eq(agent, gIter2->data, pcmk__str_casei)) { found = true; } } g_list_free_full(agents, free); return found; } static int lrmd_api_list_ocf_providers(lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers) { int rc = pcmk_ok; char *provider = NULL; GList *ocf_providers = NULL; GList *gIter = NULL; ocf_providers = resources_list_providers(PCMK_RESOURCE_CLASS_OCF); for (gIter = ocf_providers; gIter != NULL; gIter = gIter->next) { provider = gIter->data; if (!agent || does_provider_have_agent(agent, provider, PCMK_RESOURCE_CLASS_OCF)) { *providers = lrmd_list_add(*providers, (const char *)gIter->data); rc++; } } g_list_free_full(ocf_providers, free); return rc; } static int lrmd_api_list_standards(lrmd_t * lrmd, lrmd_list_t ** supported) { int rc = 0; GList *standards = NULL; GList *gIter = NULL; standards = resources_list_standards(); for (gIter = standards; gIter != NULL; gIter = gIter->next) { *supported = lrmd_list_add(*supported, (const char *)gIter->data); rc++; } if (list_stonith_agents(NULL) > 0) { *supported = lrmd_list_add(*supported, PCMK_RESOURCE_CLASS_STONITH); rc++; } g_list_free_full(standards, free); return rc; } /*! * \internal * \brief Create an executor API object * * \param[out] api Will be set to newly created API object (it is the * caller's responsibility to free this value with * lrmd_api_delete() if this function succeeds) * \param[in] nodename If the object will be used for a remote connection, * the node name to use in cluster for remote executor * \param[in] server If the object will be used for a remote connection, * the resolvable host name to connect to * \param[in] port If the object will be used for a remote connection, * port number on \p server to connect to * * \return Standard Pacemaker return code * \note If the caller leaves one of \p nodename or \p server NULL, the other's * value will be used for both. If the caller leaves both NULL, an API * object will be created for a local executor connection. */ int lrmd__new(lrmd_t **api, const char *nodename, const char *server, int port) { lrmd_private_t *pvt = NULL; if (api == NULL) { return EINVAL; } *api = NULL; // Allocate all memory needed *api = calloc(1, sizeof(lrmd_t)); if (*api == NULL) { return ENOMEM; } pvt = calloc(1, sizeof(lrmd_private_t)); if (pvt == NULL) { lrmd_api_delete(*api); *api = NULL; return ENOMEM; } (*api)->lrmd_private = pvt; // @TODO Do we need to do this for local connections? pvt->remote = calloc(1, sizeof(pcmk__remote_t)); (*api)->cmds = calloc(1, sizeof(lrmd_api_operations_t)); if ((pvt->remote == NULL) || ((*api)->cmds == NULL)) { lrmd_api_delete(*api); *api = NULL; return ENOMEM; } // Set methods (*api)->cmds->connect = lrmd_api_connect; (*api)->cmds->connect_async = lrmd_api_connect_async; (*api)->cmds->is_connected = lrmd_api_is_connected; (*api)->cmds->poke_connection = lrmd_api_poke_connection; (*api)->cmds->disconnect = lrmd_api_disconnect; (*api)->cmds->register_rsc = lrmd_api_register_rsc; (*api)->cmds->unregister_rsc = lrmd_api_unregister_rsc; (*api)->cmds->get_rsc_info = lrmd_api_get_rsc_info; (*api)->cmds->get_recurring_ops = lrmd_api_get_recurring_ops; (*api)->cmds->set_callback = lrmd_api_set_callback; (*api)->cmds->get_metadata = lrmd_api_get_metadata; (*api)->cmds->exec = lrmd_api_exec; (*api)->cmds->cancel = lrmd_api_cancel; (*api)->cmds->list_agents = lrmd_api_list_agents; (*api)->cmds->list_ocf_providers = lrmd_api_list_ocf_providers; (*api)->cmds->list_standards = lrmd_api_list_standards; (*api)->cmds->exec_alert = lrmd_api_exec_alert; (*api)->cmds->get_metadata_params = lrmd_api_get_metadata_params; if ((nodename == NULL) && (server == NULL)) { pvt->type = pcmk__client_ipc; } else { if (nodename == NULL) { nodename = server; } else if (server == NULL) { server = nodename; } pvt->type = pcmk__client_tls; pvt->remote_nodename = strdup(nodename); pvt->server = strdup(server); if ((pvt->remote_nodename == NULL) || (pvt->server == NULL)) { lrmd_api_delete(*api); *api = NULL; return ENOMEM; } pvt->port = port; if (pvt->port == 0) { pvt->port = crm_default_remote_port(); } } return pcmk_rc_ok; } lrmd_t * lrmd_api_new(void) { lrmd_t *api = NULL; pcmk__assert(lrmd__new(&api, NULL, NULL, 0) == pcmk_rc_ok); return api; } lrmd_t * lrmd_remote_api_new(const char *nodename, const char *server, int port) { lrmd_t *api = NULL; pcmk__assert(lrmd__new(&api, nodename, server, port) == pcmk_rc_ok); return api; } void lrmd_api_delete(lrmd_t * lrmd) { if (lrmd == NULL) { return; } if (lrmd->cmds != NULL) { // Never NULL, but make static analysis happy if (lrmd->cmds->disconnect != NULL) { // Also never really NULL lrmd->cmds->disconnect(lrmd); // No-op if already disconnected } free(lrmd->cmds); } if (lrmd->lrmd_private != NULL) { lrmd_private_t *native = lrmd->lrmd_private; free(native->server); free(native->remote_nodename); free(native->remote); free(native->token); free(native->peer_version); free(lrmd->lrmd_private); } free(lrmd); } struct metadata_cb { void (*callback)(int pid, const pcmk__action_result_t *result, void *user_data); void *user_data; }; /*! * \internal * \brief Process asynchronous metadata completion * * \param[in,out] action Metadata action that completed */ static void metadata_complete(svc_action_t *action) { struct metadata_cb *metadata_cb = (struct metadata_cb *) action->cb_data; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; services__copy_result(action, &result); pcmk__set_result_output(&result, action->stdout_data, action->stderr_data); metadata_cb->callback(0, &result, metadata_cb->user_data); result.action_stdout = NULL; // Prevent free, because action owns it result.action_stderr = NULL; // Prevent free, because action owns it pcmk__reset_result(&result); free(metadata_cb); } /*! * \internal * \brief Retrieve agent metadata asynchronously * * \param[in] rsc Resource agent specification * \param[in] callback Function to call with result (this will always be * called, whether by this function directly or later * via the main loop, and on success the metadata will * be in its result argument's action_stdout) * \param[in,out] user_data User data to pass to callback * * \return Standard Pacemaker return code * \note This function is not a lrmd_api_operations_t method because it does not * need an lrmd_t object and does not go through the executor, but * executes the agent directly. */ int lrmd__metadata_async(const lrmd_rsc_info_t *rsc, void (*callback)(int pid, const pcmk__action_result_t *result, void *user_data), void *user_data) { svc_action_t *action = NULL; struct metadata_cb *metadata_cb = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(callback != NULL, return EINVAL); if ((rsc == NULL) || (rsc->standard == NULL) || (rsc->type == NULL)) { pcmk__set_result(&result, PCMK_OCF_NOT_CONFIGURED, PCMK_EXEC_ERROR_FATAL, "Invalid resource specification"); callback(0, &result, user_data); pcmk__reset_result(&result); return EINVAL; } if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_STONITH) == 0) { return stonith__metadata_async(rsc->type, pcmk__timeout_ms2s(PCMK_DEFAULT_ACTION_TIMEOUT_MS), callback, user_data); } action = services__create_resource_action(pcmk__s(rsc->id, rsc->type), rsc->standard, rsc->provider, rsc->type, PCMK_ACTION_META_DATA, 0, PCMK_DEFAULT_ACTION_TIMEOUT_MS, NULL, 0); if (action == NULL) { pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Out of memory"); callback(0, &result, user_data); pcmk__reset_result(&result); return ENOMEM; } if (action->rc != PCMK_OCF_UNKNOWN) { services__copy_result(action, &result); callback(0, &result, user_data); pcmk__reset_result(&result); services_action_free(action); return EINVAL; } action->cb_data = calloc(1, sizeof(struct metadata_cb)); if (action->cb_data == NULL) { services_action_free(action); pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Out of memory"); callback(0, &result, user_data); pcmk__reset_result(&result); return ENOMEM; } metadata_cb = (struct metadata_cb *) action->cb_data; metadata_cb->callback = callback; metadata_cb->user_data = user_data; if (!services_action_async(action, metadata_complete)) { services_action_free(action); return pcmk_rc_error; // @TODO Derive from action->rc and ->status } // The services library has taken responsibility for action return pcmk_rc_ok; } /*! * \internal * \brief Set the result of an executor event * * \param[in,out] event Executor event to set * \param[in] rc OCF exit status of event * \param[in] op_status Executor status of event * \param[in] exit_reason Human-friendly description of event */ void lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc, int op_status, const char *exit_reason) { if (event == NULL) { return; } event->rc = rc; event->op_status = op_status; // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees pcmk__str_update((char **) &event->exit_reason, exit_reason); } /*! * \internal * \brief Clear an executor event's exit reason, output, and error output * * \param[in,out] event Executor event to reset */ void lrmd__reset_result(lrmd_event_data_t *event) { if (event == NULL) { return; } free((void *) event->exit_reason); event->exit_reason = NULL; free((void *) event->output); event->output = NULL; } /*! * \internal * \brief Get the uptime of a remote resource connection * * When the cluster connects to a remote resource, part of that resource's * handshake includes the uptime of the remote resource's connection. This * uptime is stored in the lrmd_t object. * * \return The connection's uptime, or -1 if unknown */ time_t lrmd__uptime(lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote == NULL) { return -1; } else { return native->remote->uptime; } } const char * lrmd__node_start_state(lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote == NULL) { return NULL; } else { return native->remote->start_state; } } diff --git a/lib/pacemaker/pcmk_graph_consumer.c b/lib/pacemaker/pcmk_graph_consumer.c index b8de60985e..d69dd2ba95 100644 --- a/lib/pacemaker/pcmk_graph_consumer.c +++ b/lib/pacemaker/pcmk_graph_consumer.c @@ -1,865 +1,865 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include /* * Functions for freeing transition graph objects */ /*! * \internal * \brief Free a transition graph action object * * \param[in,out] user_data Action to free */ static void free_graph_action(gpointer user_data) { pcmk__graph_action_t *action = user_data; if (action->timer != 0) { crm_warn("Cancelling timer for graph action %d", action->id); g_source_remove(action->timer); } if (action->params != NULL) { g_hash_table_destroy(action->params); } pcmk__xml_free(action->xml); free(action); } /*! * \internal * \brief Free a transition graph synapse object * * \param[in,out] user_data Synapse to free */ static void free_graph_synapse(gpointer user_data) { pcmk__graph_synapse_t *synapse = user_data; g_list_free_full(synapse->actions, free_graph_action); g_list_free_full(synapse->inputs, free_graph_action); free(synapse); } /*! * \internal * \brief Free a transition graph object * * \param[in,out] graph Transition graph to free */ void pcmk__free_graph(pcmk__graph_t *graph) { if (graph != NULL) { g_list_free_full(graph->synapses, free_graph_synapse); free(graph->source); free(graph->failed_stop_offset); free(graph->failed_start_offset); free(graph); } } /* * Functions for updating graph */ /*! * \internal * \brief Update synapse after completed prerequisite * * A synapse is ready to be executed once all its prerequisite actions (inputs) * complete. Given a completed action, check whether it is an input for a given * synapse, and if so, mark the input as confirmed, and mark the synapse as * ready if appropriate. * * \param[in,out] synapse Transition graph synapse to update * \param[in] action_id ID of an action that completed * * \note The only substantial effect here is confirming synapse inputs. * should_fire_synapse() will recalculate pcmk__synapse_ready, so the only * thing that uses the pcmk__synapse_ready from here is * synapse_state_str(). */ static void update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id) { if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) { return; // All inputs have already been confirmed } // Presume ready until proven otherwise pcmk__set_synapse_flags(synapse, pcmk__synapse_ready); for (GList *lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data; if (prereq->id == action_id) { crm_trace("Confirming input %d of synapse %d", action_id, synapse->id); pcmk__set_graph_action_flags(prereq, pcmk__graph_action_confirmed); } else if (!pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed)) { pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready); crm_trace("Synapse %d still not ready after action %d", synapse->id, action_id); } } if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) { crm_trace("Synapse %d is now ready to execute", synapse->id); } } /*! * \internal * \brief Update action and synapse confirmation after action completion * * \param[in,out] synapse Transition graph synapse that action belongs to * \param[in] action_id ID of action that completed */ static void update_synapse_confirmed(pcmk__graph_synapse_t *synapse, int action_id) { bool all_confirmed = true; for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data; if (action->id == action_id) { crm_trace("Confirmed action %d of synapse %d", action_id, synapse->id); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); } else if (all_confirmed && !pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) { all_confirmed = false; crm_trace("Synapse %d still not confirmed after action %d", synapse->id, action_id); } } if (all_confirmed && !pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) { crm_trace("Confirmed synapse %d", synapse->id); pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed); } } /*! * \internal * \brief Update the transition graph with a completed action result * * \param[in,out] graph Transition graph to update * \param[in] action Action that completed */ void pcmk__update_graph(pcmk__graph_t *graph, const pcmk__graph_action_t *action) { for (GList *lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_failed)) { continue; // This synapse already completed } else if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) { update_synapse_confirmed(synapse, action->id); } else if (!pcmk_is_set(action->flags, pcmk__graph_action_failed) || (synapse->priority == PCMK_SCORE_INFINITY)) { update_synapse_ready(synapse, action->id); } } } /* * Functions for executing graph */ /* A transition graph consists of various types of actions. The library caller * registers execution functions for each action type, which will be stored * here. */ static pcmk__graph_functions_t *graph_fns = NULL; /*! * \internal * \brief Set transition graph execution functions * * \param[in] Execution functions to use */ void pcmk__set_graph_functions(pcmk__graph_functions_t *fns) { pcmk__assert((fns != NULL) && (fns->rsc != NULL) && (fns->cluster != NULL) && (fns->pseudo != NULL) && (fns->fence != NULL)); crm_debug("Setting custom functions for executing transition graphs"); graph_fns = fns; } /*! * \internal * \brief Check whether a graph synapse is ready to be executed * * \param[in,out] graph Transition graph that synapse is part of * \param[in,out] synapse Synapse to check * * \return true if synapse is ready, false otherwise */ static bool should_fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse) { GList *lpc = NULL; pcmk__set_synapse_flags(synapse, pcmk__synapse_ready); for (lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data; if (!(pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed))) { crm_trace("Input %d for synapse %d not yet confirmed", prereq->id, synapse->id); pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready); break; } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed)) { crm_trace("Input %d for synapse %d confirmed but failed", prereq->id, synapse->id); pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready); break; } } if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) { crm_trace("Synapse %d is ready to execute", synapse->id); } else { return false; } for (lpc = synapse->actions; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *a = (pcmk__graph_action_t *) lpc->data; if (a->type == pcmk__pseudo_graph_action) { /* None of the below applies to pseudo ops */ } else if (synapse->priority < graph->abort_priority) { crm_trace("Skipping synapse %d: priority %d is less than " "abort priority %d", synapse->id, synapse->priority, graph->abort_priority); graph->skipped++; return false; } else if (graph_fns->allowed && !(graph_fns->allowed(graph, a))) { crm_trace("Deferring synapse %d: not allowed", synapse->id); return false; } } return true; } /*! * \internal * \brief Initiate an action from a transition graph * * \param[in,out] graph Transition graph containing action * \param[in,out] action Action to execute * * \return Standard Pacemaker return code */ static int initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *id = pcmk__xe_id(action->xml); CRM_CHECK(id != NULL, return EINVAL); CRM_CHECK(!pcmk_is_set(action->flags, pcmk__graph_action_executed), return pcmk_rc_already); pcmk__set_graph_action_flags(action, pcmk__graph_action_executed); switch (action->type) { case pcmk__pseudo_graph_action: crm_trace("Executing pseudo-action %d (%s)", action->id, id); return graph_fns->pseudo(graph, action); case pcmk__rsc_graph_action: crm_trace("Executing resource action %d (%s)", action->id, id); return graph_fns->rsc(graph, action); case pcmk__cluster_graph_action: if (pcmk__str_eq(pcmk__xe_get(action->xml, PCMK_XA_OPERATION), PCMK_ACTION_STONITH, pcmk__str_none)) { crm_trace("Executing fencing action %d (%s)", action->id, id); return graph_fns->fence(graph, action); } crm_trace("Executing cluster action %d (%s)", action->id, id); return graph_fns->cluster(graph, action); default: crm_err("Unsupported graph action type <%s " PCMK_XA_ID "='%s'> " "(bug?)", action->xml->name, id); return EINVAL; } } /*! * \internal * \brief Execute a graph synapse * * \param[in,out] graph Transition graph with synapse to execute * \param[in,out] synapse Synapse to execute * * \return Standard Pacemaker return value */ static int fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse) { pcmk__set_synapse_flags(synapse, pcmk__synapse_executed); for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) { pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data; int rc = initiate_action(graph, action); if (rc != pcmk_rc_ok) { crm_err("Failed initiating <%s " PCMK_XA_ID "=%d> in synapse %d: " "%s", action->xml->name, action->id, synapse->id, pcmk_rc_str(rc)); pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed |pcmk__graph_action_failed); return pcmk_rc_error; } } return pcmk_rc_ok; } /*! * \internal * \brief Dummy graph method that can be used with simulations * * \param[in,out] graph Transition graph containing action * \param[in,out] action Graph action to be initiated * * \return Standard Pacemaker return code * \note If the PE_fail environment variable is set to the action ID, * then the graph action will be marked as failed. */ static int pseudo_action_dummy(pcmk__graph_t *graph, pcmk__graph_action_t *action) { static int fail = -1; if (fail < 0) { long long fail_ll; if ((pcmk__scan_ll(getenv("PE_fail"), &fail_ll, 0LL) == pcmk_rc_ok) && (fail_ll > 0LL) && (fail_ll <= INT_MAX)) { fail = (int) fail_ll; } else { fail = 0; } } if (action->id == fail) { crm_err("Dummy event handler: pretending action %d failed", action->id); pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); graph->abort_priority = PCMK_SCORE_INFINITY; } else { crm_trace("Dummy event handler: action %d initiated", action->id); } pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); pcmk__update_graph(graph, action); return pcmk_rc_ok; } static pcmk__graph_functions_t default_fns = { pseudo_action_dummy, pseudo_action_dummy, pseudo_action_dummy, pseudo_action_dummy }; /*! * \internal * \brief Execute all actions in a transition graph * * \param[in,out] graph Transition graph to execute * * \return Status of transition after execution */ enum pcmk__graph_status pcmk__execute_graph(pcmk__graph_t *graph) { GList *lpc = NULL; int log_level = LOG_DEBUG; enum pcmk__graph_status pass_result = pcmk__graph_active; const char *status = "In progress"; if (graph_fns == NULL) { graph_fns = &default_fns; } if (graph == NULL) { return pcmk__graph_complete; } graph->fired = 0; graph->pending = 0; graph->skipped = 0; graph->completed = 0; graph->incomplete = 0; // Count completed and in-flight synapses for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) { graph->completed++; } else if (!pcmk_is_set(synapse->flags, pcmk__synapse_failed) && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) { graph->pending++; } } crm_trace("Executing graph %d (%d synapses already completed, %d pending)", graph->id, graph->completed, graph->pending); // Execute any synapses that are ready for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) { pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data; if ((graph->batch_limit > 0) && (graph->pending >= graph->batch_limit)) { crm_debug("Throttling graph execution: batch limit (%d) reached", graph->batch_limit); break; } else if (pcmk_is_set(synapse->flags, pcmk__synapse_failed)) { graph->skipped++; continue; } else if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed |pcmk__synapse_executed)) { continue; // Already handled } else if (should_fire_synapse(graph, synapse)) { graph->fired++; if (fire_synapse(graph, synapse) != pcmk_rc_ok) { crm_err("Synapse %d failed to fire", synapse->id); log_level = LOG_ERR; graph->abort_priority = PCMK_SCORE_INFINITY; graph->incomplete++; graph->fired--; } if (!(pcmk_is_set(synapse->flags, pcmk__synapse_confirmed))) { graph->pending++; } } else { crm_trace("Synapse %d cannot fire", synapse->id); graph->incomplete++; } } if ((graph->pending == 0) && (graph->fired == 0)) { graph->complete = true; if ((graph->incomplete != 0) && (graph->abort_priority <= 0)) { log_level = LOG_WARNING; pass_result = pcmk__graph_terminated; status = "Terminated"; } else if (graph->skipped != 0) { log_level = LOG_NOTICE; pass_result = pcmk__graph_complete; status = "Stopped"; } else { log_level = LOG_NOTICE; pass_result = pcmk__graph_complete; status = "Complete"; } } else if (graph->fired == 0) { pass_result = pcmk__graph_pending; } do_crm_log(log_level, "Transition %d (Complete=%d, Pending=%d," " Fired=%d, Skipped=%d, Incomplete=%d, Source=%s): %s", graph->id, graph->completed, graph->pending, graph->fired, graph->skipped, graph->incomplete, graph->source, status); return pass_result; } /* * Functions for unpacking transition graph XML into structs */ /*! * \internal * \brief Unpack a transition graph action from XML * * \param[in] parent Synapse that action is part of * \param[in] xml_action Action XML to unparse * * \return Newly allocated action on success, or NULL otherwise */ static pcmk__graph_action_t * unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action) { enum pcmk__graph_action_type action_type; pcmk__graph_action_t *action = NULL; const char *value = pcmk__xe_id(xml_action); if (value == NULL) { crm_err("Ignoring transition graph action without " PCMK_XA_ID " (bug?)"); crm_log_xml_trace(xml_action, "invalid"); return NULL; } if (pcmk__xe_is(xml_action, PCMK__XE_RSC_OP)) { action_type = pcmk__rsc_graph_action; } else if (pcmk__xe_is(xml_action, PCMK__XE_PSEUDO_EVENT)) { action_type = pcmk__pseudo_graph_action; } else if (pcmk__xe_is(xml_action, PCMK__XE_CRM_EVENT)) { action_type = pcmk__cluster_graph_action; } else { crm_err("Ignoring transition graph action of unknown type '%s' (bug?)", xml_action->name); crm_log_xml_trace(xml_action, "invalid"); return NULL; } action = calloc(1, sizeof(pcmk__graph_action_t)); if (action == NULL) { crm_perror(LOG_CRIT, "Cannot unpack transition graph action"); crm_log_xml_trace(xml_action, "lost"); return NULL; } pcmk__scan_min_int(value, &(action->id), -1); action->type = pcmk__rsc_graph_action; action->xml = pcmk__xml_copy(NULL, xml_action); action->synapse = parent; action->type = action_type; action->params = xml2list(action->xml); value = crm_meta_value(action->params, PCMK_META_TIMEOUT); pcmk__scan_min_int(value, &(action->timeout), 0); /* Take PCMK_META_START_DELAY into account for the timeout of the action * timer */ value = crm_meta_value(action->params, PCMK_META_START_DELAY); { int start_delay; pcmk__scan_min_int(value, &start_delay, 0); action->timeout += start_delay; } if (pcmk__guint_from_hash(action->params, CRM_META "_" PCMK_META_INTERVAL, 0, &(action->interval_ms)) != pcmk_rc_ok) { action->interval_ms = 0; } crm_trace("Action %d has timer set to %dms", action->id, action->timeout); return action; } /*! * \internal * \brief Unpack transition graph synapse from XML * * \param[in,out] new_graph Transition graph that synapse is part of * \param[in] xml_synapse Synapse XML * * \return Newly allocated synapse on success, or NULL otherwise */ static pcmk__graph_synapse_t * unpack_synapse(pcmk__graph_t *new_graph, const xmlNode *xml_synapse) { const char *value = NULL; xmlNode *action_set = NULL; pcmk__graph_synapse_t *new_synapse = NULL; crm_trace("Unpacking synapse %s", pcmk__xe_id(xml_synapse)); new_synapse = calloc(1, sizeof(pcmk__graph_synapse_t)); if (new_synapse == NULL) { return NULL; } pcmk__scan_min_int(pcmk__xe_id(xml_synapse), &(new_synapse->id), 0); value = pcmk__xe_get(xml_synapse, PCMK__XA_PRIORITY); pcmk__scan_min_int(value, &(new_synapse->priority), 0); CRM_CHECK(new_synapse->id >= 0, free_graph_synapse((gpointer) new_synapse); return NULL); new_graph->num_synapses++; crm_trace("Unpacking synapse %s action sets", pcmk__xe_get(xml_synapse, PCMK_XA_ID)); for (action_set = pcmk__xe_first_child(xml_synapse, PCMK__XE_ACTION_SET, NULL, NULL); action_set != NULL; action_set = pcmk__xe_next(action_set, PCMK__XE_ACTION_SET)) { for (xmlNode *action = pcmk__xe_first_child(action_set, NULL, NULL, NULL); action != NULL; action = pcmk__xe_next(action, NULL)) { pcmk__graph_action_t *new_action = unpack_action(new_synapse, action); if (new_action == NULL) { continue; } crm_trace("Adding action %d to synapse %d", new_action->id, new_synapse->id); new_graph->num_actions++; new_synapse->actions = g_list_append(new_synapse->actions, new_action); } } crm_trace("Unpacking synapse %s inputs", pcmk__xe_id(xml_synapse)); for (xmlNode *inputs = pcmk__xe_first_child(xml_synapse, PCMK__XE_INPUTS, NULL, NULL); inputs != NULL; inputs = pcmk__xe_next(inputs, PCMK__XE_INPUTS)) { for (xmlNode *trigger = pcmk__xe_first_child(inputs, PCMK__XE_TRIGGER, NULL, NULL); trigger != NULL; trigger = pcmk__xe_next(trigger, PCMK__XE_TRIGGER)) { for (xmlNode *input = pcmk__xe_first_child(trigger, NULL, NULL, NULL); input != NULL; input = pcmk__xe_next(input, NULL)) { pcmk__graph_action_t *new_input = unpack_action(new_synapse, input); if (new_input == NULL) { continue; } crm_trace("Adding input %d to synapse %d", new_input->id, new_synapse->id); new_synapse->inputs = g_list_append(new_synapse->inputs, new_input); } } } return new_synapse; } /*! * \internal * \brief Unpack transition graph XML * * \param[in] xml_graph Transition graph XML to unpack * \param[in] reference Where the XML came from (for logging) * * \return Newly allocated transition graph on success, NULL otherwise * \note The caller is responsible for freeing the return value using * pcmk__free_graph(). * \note The XML is expected to be structured like: ... ... */ pcmk__graph_t * pcmk__unpack_graph(const xmlNode *xml_graph, const char *reference) { pcmk__graph_t *new_graph = NULL; new_graph = calloc(1, sizeof(pcmk__graph_t)); if (new_graph == NULL) { return NULL; } new_graph->source = strdup(pcmk__s(reference, "unknown")); if (new_graph->source == NULL) { pcmk__free_graph(new_graph); return NULL; } new_graph->completion_action = pcmk__graph_done; // Parse top-level attributes from PCMK__XE_TRANSITION_GRAPH if (xml_graph != NULL) { const char *buf = pcmk__xe_get(xml_graph, "transition_id"); CRM_CHECK(buf != NULL, pcmk__free_graph(new_graph); return NULL); pcmk__scan_min_int(buf, &(new_graph->id), 1); buf = pcmk__xe_get(xml_graph, PCMK_OPT_CLUSTER_DELAY); CRM_CHECK(buf != NULL, pcmk__free_graph(new_graph); return NULL); pcmk_parse_interval_spec(buf, &(new_graph->network_delay)); buf = pcmk__xe_get(xml_graph, PCMK_OPT_STONITH_TIMEOUT); if (buf == NULL) { new_graph->stonith_timeout = new_graph->network_delay; } else { pcmk_parse_interval_spec(buf, &(new_graph->stonith_timeout)); } // Use 0 (dynamic limit) as default/invalid, -1 (no limit) as minimum buf = pcmk__xe_get(xml_graph, PCMK_OPT_BATCH_LIMIT); if ((buf == NULL) || (pcmk__scan_min_int(buf, &(new_graph->batch_limit), -1) != pcmk_rc_ok)) { new_graph->batch_limit = 0; } buf = pcmk__xe_get(xml_graph, PCMK_OPT_MIGRATION_LIMIT); pcmk__scan_min_int(buf, &(new_graph->migration_limit), -1); new_graph->failed_stop_offset = - crm_element_value_copy(xml_graph, PCMK__XA_FAILED_STOP_OFFSET); + pcmk__xe_get_copy(xml_graph, PCMK__XA_FAILED_STOP_OFFSET); new_graph->failed_start_offset = - crm_element_value_copy(xml_graph, PCMK__XA_FAILED_START_OFFSET); + pcmk__xe_get_copy(xml_graph, PCMK__XA_FAILED_START_OFFSET); pcmk__xe_get_time(xml_graph, "recheck-by", &(new_graph->recheck_by)); } // Unpack each child element for (const xmlNode *synapse_xml = pcmk__xe_first_child(xml_graph, PCMK__XE_SYNAPSE, NULL, NULL); synapse_xml != NULL; synapse_xml = pcmk__xe_next(synapse_xml, PCMK__XE_SYNAPSE)) { pcmk__graph_synapse_t *new_synapse = unpack_synapse(new_graph, synapse_xml); if (new_synapse != NULL) { new_graph->synapses = g_list_append(new_graph->synapses, new_synapse); } } crm_debug("Unpacked transition %d from %s: %d actions in %d synapses", new_graph->id, new_graph->source, new_graph->num_actions, new_graph->num_synapses); return new_graph; } /* * Other transition graph utilities */ /*! * \internal * \brief Synthesize an executor event from a graph action * * \param[in] resource If not NULL, use greater call ID than in this XML * \param[in] action Graph action * \param[in] status What to use as event execution status * \param[in] rc What to use as event exit status * \param[in] exit_reason What to use as event exit reason * * \return Newly allocated executor event on success, or NULL otherwise */ lrmd_event_data_t * pcmk__event_from_graph_action(const xmlNode *resource, const pcmk__graph_action_t *action, int status, int rc, const char *exit_reason) { lrmd_event_data_t *op = NULL; GHashTableIter iter; const char *name = NULL; const char *value = NULL; xmlNode *action_resource = NULL; CRM_CHECK(action != NULL, return NULL); CRM_CHECK(action->type == pcmk__rsc_graph_action, return NULL); action_resource = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL, NULL); CRM_CHECK(action_resource != NULL, crm_log_xml_warn(action->xml, "invalid"); return NULL); op = lrmd_new_event(pcmk__xe_id(action_resource), pcmk__xe_get(action->xml, PCMK_XA_OPERATION), action->interval_ms); lrmd__set_result(op, rc, status, exit_reason); op->t_run = time(NULL); op->t_rcchange = op->t_run; op->params = pcmk__strkey_table(free, free); g_hash_table_iter_init(&iter, action->params); while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) { pcmk__insert_dup(op->params, name, value); } for (xmlNode *xop = pcmk__xe_first_child(resource, NULL, NULL, NULL); xop != NULL; xop = pcmk__xe_next(xop, NULL)) { int tmp = 0; pcmk__xe_get_int(xop, PCMK__XA_CALL_ID, &tmp); crm_debug("Got call_id=%d for %s", tmp, pcmk__xe_id(resource)); if (tmp > op->call_id) { op->call_id = tmp; } } op->call_id++; return op; } diff --git a/lib/pacemaker/pcmk_simulate.c b/lib/pacemaker/pcmk_simulate.c index 1784c15c68..9f9b3863df 100644 --- a/lib/pacemaker/pcmk_simulate.c +++ b/lib/pacemaker/pcmk_simulate.c @@ -1,1068 +1,1068 @@ /* * Copyright 2021-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include // uint32_t, uint64_t #include #include #include #include "libpacemaker_private.h" static const char *profiling_dir = NULL; static pcmk__output_t *out = NULL; static cib_t *fake_cib = NULL; static GList *fake_resource_list = NULL; static const GList *fake_op_fail_list = NULL; static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original, const char *use_date); /*! * \internal * \brief Create an action name for use in a dot graph * * \param[in] action Action to create name for * \param[in] verbose If true, add action ID to name * * \return Newly allocated string with action name * \note It is the caller's responsibility to free the result. */ static char * create_action_name(const pcmk_action_t *action, bool verbose) { char *action_name = NULL; const char *prefix = ""; const char *action_host = NULL; const char *history_id = NULL; const char *task = action->task; if (action->node != NULL) { action_host = action->node->priv->name; } else if (!pcmk_is_set(action->flags, pcmk__action_pseudo)) { action_host = ""; } if (pcmk__str_eq(action->task, PCMK_ACTION_CANCEL, pcmk__str_none)) { prefix = "Cancel "; task = action->cancel_task; } if (action->rsc != NULL) { history_id = action->rsc->priv->history_id; } if (history_id != NULL) { char *key = NULL; guint interval_ms = 0; if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0, &interval_ms) != pcmk_rc_ok) { interval_ms = 0; } if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY, PCMK_ACTION_NOTIFIED, NULL)) { const char *n_type = g_hash_table_lookup(action->meta, "notify_key_type"); const char *n_task = g_hash_table_lookup(action->meta, "notify_key_operation"); pcmk__assert((n_type != NULL) && (n_task != NULL)); key = pcmk__notify_key(history_id, n_type, n_task); } else { key = pcmk__op_key(history_id, task, interval_ms); } if (action_host != NULL) { action_name = crm_strdup_printf("%s%s %s", prefix, key, action_host); } else { action_name = crm_strdup_printf("%s%s", prefix, key); } free(key); } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) { const char *op = g_hash_table_lookup(action->meta, PCMK__META_STONITH_ACTION); action_name = crm_strdup_printf("%s%s '%s' %s", prefix, action->task, op, action_host); } else if (action->rsc && action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, action->uuid, action_host); } else if (action_host) { action_name = crm_strdup_printf("%s%s %s", prefix, action->task, action_host); } else { action_name = crm_strdup_printf("%s", action->uuid); } if (verbose) { char *with_id = crm_strdup_printf("%s (%d)", action_name, action->id); free(action_name); action_name = with_id; } return action_name; } /*! * \internal * \brief Display the status of a cluster * * \param[in,out] scheduler Scheduler data * \param[in] show_opts How to modify display (as pcmk_show_opt_e flags) * \param[in] section_opts Sections to display (as pcmk_section_e flags) * \param[in] title What to use as list title * \param[in] print_spacer Whether to display a spacer first */ static void print_cluster_status(pcmk_scheduler_t *scheduler, uint32_t show_opts, uint32_t section_opts, const char *title, bool print_spacer) { pcmk__output_t *out = scheduler->priv->out; GList *all = NULL; crm_exit_t stonith_rc = 0; enum pcmk_pacemakerd_state state = pcmk_pacemakerd_state_invalid; section_opts |= pcmk_section_nodes | pcmk_section_resources; show_opts |= pcmk_show_inactive_rscs | pcmk_show_failed_detail; all = g_list_prepend(all, (gpointer) "*"); PCMK__OUTPUT_SPACER_IF(out, print_spacer); out->begin_list(out, NULL, NULL, "%s", title); out->message(out, "cluster-status", scheduler, state, stonith_rc, NULL, pcmk__fence_history_none, section_opts, show_opts, NULL, all, all); out->end_list(out); g_list_free(all); } /*! * \internal * \brief Display a summary of all actions scheduled in a transition * * \param[in,out] scheduler Scheduler data (fully scheduled) * \param[in] print_spacer Whether to display a spacer first */ static void print_transition_summary(pcmk_scheduler_t *scheduler, bool print_spacer) { pcmk__output_t *out = scheduler->priv->out; PCMK__OUTPUT_SPACER_IF(out, print_spacer); out->begin_list(out, NULL, NULL, "Transition Summary"); pcmk__output_actions(scheduler); out->end_list(out); } /*! * \internal * \brief Reset scheduler, set some members, and unpack status * * \param[in,out] scheduler Scheduler data * \param[in] input What to set as cluster input * \param[in] out What to set as cluster output object * \param[in] use_date What to set as cluster's current timestamp * \param[in] flags Group of enum pcmk__scheduler_flags to set */ static void reset(pcmk_scheduler_t *scheduler, xmlNodePtr input, pcmk__output_t *out, const char *use_date, unsigned int flags) { pcmk_reset_scheduler(scheduler); scheduler->input = input; scheduler->priv->out = out; set_effective_date(scheduler, true, use_date); if (pcmk_is_set(flags, pcmk_sim_sanitized)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_sanitized); } if (pcmk_is_set(flags, pcmk_sim_show_scores)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_output_scores); } if (pcmk_is_set(flags, pcmk_sim_show_utilization)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_show_utilization); } cluster_status(scheduler); } /*! * \brief Write out a file in dot(1) format describing the actions that will * be taken by the scheduler in response to an input CIB file. * * \param[in,out] scheduler Scheduler data * \param[in] dot_file The filename to write * \param[in] all_actions Write all actions, even those that are optional * or are on unmanaged resources * \param[in] verbose Add extra information, such as action IDs, to the * output * * \return Standard Pacemaker return code */ static int write_sim_dotfile(pcmk_scheduler_t *scheduler, const char *dot_file, bool all_actions, bool verbose) { GList *iter = NULL; FILE *dot_strm = fopen(dot_file, "w"); if (dot_strm == NULL) { return errno; } fprintf(dot_strm, " digraph \"g\" {\n"); for (iter = scheduler->priv->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = (pcmk_action_t *) iter->data; const char *style = "dashed"; const char *font = "black"; const char *color = "black"; char *action_name = create_action_name(action, verbose); if (pcmk_is_set(action->flags, pcmk__action_pseudo)) { font = "orange"; } if (pcmk_is_set(action->flags, pcmk__action_added_to_graph)) { style = PCMK__VALUE_BOLD; color = "green"; } else if ((action->rsc != NULL) && !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed)) { color = "red"; font = "purple"; if (!all_actions) { goto do_not_write; } } else if (pcmk_is_set(action->flags, pcmk__action_optional)) { color = "blue"; if (!all_actions) { goto do_not_write; } } else { color = "red"; CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pcmk__action_runnable)); } pcmk__set_action_flags(action, pcmk__action_added_to_graph); fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n", action_name, style, color, font); do_not_write: free(action_name); } for (iter = scheduler->priv->actions; iter != NULL; iter = iter->next) { pcmk_action_t *action = (pcmk_action_t *) iter->data; for (GList *before_iter = action->actions_before; before_iter != NULL; before_iter = before_iter->next) { pcmk__related_action_t *before = before_iter->data; char *before_name = NULL; char *after_name = NULL; const char *style = "dashed"; bool optional = true; if (before->graphed) { optional = false; style = PCMK__VALUE_BOLD; } else if (before->flags == pcmk__ar_none) { continue; } else if (pcmk_is_set(before->action->flags, pcmk__action_added_to_graph) && pcmk_is_set(action->flags, pcmk__action_added_to_graph) && before->flags != pcmk__ar_if_on_same_node_or_target) { optional = false; } if (all_actions || !optional) { before_name = create_action_name(before->action, verbose); after_name = create_action_name(action, verbose); fprintf(dot_strm, "\"%s\" -> \"%s\" [ style = %s]\n", before_name, after_name, style); free(before_name); free(after_name); } } } fprintf(dot_strm, "}\n"); fflush(dot_strm); fclose(dot_strm); return pcmk_rc_ok; } /*! * \internal * \brief \c scandir() filter for scheduler input CIB files to profile * * \param[in] entry Directory entry * * \retval 1 if the filename ends with ".xml", does not begin with ".", and * refers to a regular file * \retval 0 otherwise */ static int profile_filter(const struct dirent *entry) { const char *filename = entry->d_name; char *buf = NULL; struct stat sb; int rc = 0; if (pcmk__str_any_of(filename, ".", "..", NULL)) { // Skip current (".") and parent ("..") directory links goto done; } if (filename[0] == '.') { crm_trace("Not profiling hidden file '%s'", filename); goto done; } if (!pcmk__ends_with_ext(filename, ".xml")) { crm_trace("Not profiling file '%s' without '.xml' extension", filename); goto done; } buf = crm_strdup_printf("%s/%s", profiling_dir, filename); if ((stat(buf, &sb) != 0) || !S_ISREG(sb.st_mode)) { crm_trace("Not profiling file '%s': not a regular file", filename); goto done; } rc = 1; done: free(buf); return rc; } /*! * \internal * \brief Profile the configuration updates and scheduler actions in a single * CIB file, printing the profiling timings. * * \note \p scheduler->priv->out must have been set to a valid \c pcmk__output_t * object before this function is called. * * \param[in] xml_file The CIB file to profile * \param[in] repeat Number of times to run * \param[in,out] scheduler Scheduler data * \param[in,out] flags Group of enum pcmk__scheduler_flags to set * in addition to defaults * \param[in] use_date The date to set the cluster's time to (may be NULL) */ static void profile_file(const char *xml_file, unsigned int repeat, pcmk_scheduler_t *scheduler, uint64_t flags, const char *use_date) { pcmk__output_t *out = scheduler->priv->out; xmlNode *cib_object = NULL; clock_t start = 0; clock_t end; pcmk__assert(out != NULL); cib_object = pcmk__xml_read(xml_file); start = clock(); if (pcmk_find_cib_element(cib_object, PCMK_XE_STATUS) == NULL) { pcmk__xe_create(cib_object, PCMK_XE_STATUS); } if (pcmk__update_configured_schema(&cib_object, false) != pcmk_rc_ok) { goto done; } if (!pcmk__validate_xml(cib_object, NULL, NULL, NULL)) { goto done; } for (int i = 0; i < repeat; ++i) { pcmk_reset_scheduler(scheduler); scheduler->input = cib_object; pcmk__set_scheduler_flags(scheduler, flags); set_effective_date(scheduler, false, use_date); pcmk__schedule_actions(scheduler); // Avoid freeing cib_object in pcmk_reset_scheduler() scheduler->input = NULL; } pcmk_reset_scheduler(scheduler); end = clock(); out->message(out, "profile", xml_file, start, end); done: pcmk__xml_free(cib_object); } int pcmk__profile_dir(pcmk__output_t *out, uint32_t flags, const char *dir, unsigned int repeat, const char *use_date) { pcmk_scheduler_t *scheduler = NULL; uint64_t scheduler_flags = pcmk__sched_none; struct dirent **namelist; int num_files = 0; int rc = pcmk_rc_ok; pcmk__assert(out != NULL); scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { return ENOMEM; } scheduler->priv->out = out; if (pcmk_is_set(flags, pcmk_sim_show_scores)) { scheduler_flags |= pcmk__sched_output_scores; } if (pcmk_is_set(flags, pcmk_sim_show_utilization)) { scheduler_flags |= pcmk__sched_show_utilization; } // Hack to pass user data to profile_filter profiling_dir = dir; num_files = scandir(dir, &namelist, profile_filter, alphasort); profiling_dir = NULL; if (num_files < 0) { rc = errno; goto done; } if (num_files == 0) { goto done; } out->begin_list(out, NULL, NULL, "Timings"); for (int i = 0; i < num_files; i++) { // glibc doesn't enforce PATH_MAX, so don't limit the buffer size char *path = crm_strdup_printf("%s/%s", dir, namelist[i]->d_name); profile_file(path, repeat, scheduler, scheduler_flags, use_date); free(path); free(namelist[i]); } out->end_list(out); done: pcmk_free_scheduler(scheduler); free(namelist); return rc; } /*! * \brief Set the date of the cluster, either to the value given by * \p use_date, or to the \c PCMK_XA_EXECUTION_DATE value in the CIB. * * \note \p scheduler->priv->out must have been set to a valid \p pcmk__output_t * object before this function is called. * * \param[in,out] scheduler Scheduler data * \param[in] print_original If \p true, the \c PCMK_XA_EXECUTION_DATE * should also be printed * \param[in] use_date The date to set the cluster's time to * (may be NULL) */ static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original, const char *use_date) { pcmk__output_t *out = scheduler->priv->out; time_t original_date = 0; pcmk__assert(out != NULL); pcmk__xe_get_time(scheduler->input, PCMK_XA_EXECUTION_DATE, &original_date); if (use_date) { scheduler->priv->now = crm_time_new(use_date); out->info(out, "Setting effective cluster time: %s", use_date); crm_time_log(LOG_NOTICE, "Pretending 'now' is", scheduler->priv->now, crm_time_log_date | crm_time_log_timeofday); } else if (original_date != 0) { scheduler->priv->now = pcmk__copy_timet(original_date); if (print_original) { char *when = crm_time_as_string(scheduler->priv->now, crm_time_log_date |crm_time_log_timeofday); out->info(out, "Using the original execution date of: %s", when); free(when); } } } /*! * \internal * \brief Simulate successfully executing a pseudo-action in a graph * * \param[in,out] graph Graph to update with pseudo-action result * \param[in,out] action Pseudo-action to simulate executing * * \return Standard Pacemaker return code */ static int simulate_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *node = pcmk__xe_get(action->xml, PCMK__META_ON_NODE); const char *task = pcmk__xe_get(action->xml, PCMK__XA_OPERATION_KEY); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); out->message(out, "inject-pseudo-action", node, task); pcmk__update_graph(graph, action); return pcmk_rc_ok; } /*! * \internal * \brief Simulate executing a resource action in a graph * * \param[in,out] graph Graph to update with resource action result * \param[in,out] action Resource action to simulate executing * * \return Standard Pacemaker return code */ static int simulate_resource_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { int rc; lrmd_event_data_t *op = NULL; int target_outcome = PCMK_OCF_OK; const char *rtype = NULL; const char *rclass = NULL; const char *resource = NULL; const char *rprovider = NULL; const char *resource_config_name = NULL; const char *operation = pcmk__xe_get(action->xml, PCMK_XA_OPERATION); const char *target_rc_s = crm_meta_value(action->params, PCMK__META_OP_TARGET_RC); xmlNode *cib_node = NULL; xmlNode *cib_resource = NULL; xmlNode *action_rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL, NULL); - char *node = crm_element_value_copy(action->xml, PCMK__META_ON_NODE); + char *node = pcmk__xe_get_copy(action->xml, PCMK__META_ON_NODE); char *uuid = NULL; const char *router_node = pcmk__xe_get(action->xml, PCMK__XA_ROUTER_NODE); // Certain actions don't need to be displayed or history entries if (pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_none)) { crm_debug("No history injection for %s op on %s", operation, node); goto done; // Confirm action and update graph } if (action_rsc == NULL) { // Shouldn't be possible crm_log_xml_err(action->xml, "Bad"); free(node); return EPROTO; } /* A resource might be known by different names in the configuration and in * the action (for example, a clone instance). Grab the configuration name * (which is preferred when writing history), and if necessary, the instance * name. */ resource_config_name = pcmk__xe_get(action_rsc, PCMK_XA_ID); if (resource_config_name == NULL) { // Shouldn't be possible crm_log_xml_err(action->xml, "No ID"); free(node); return EPROTO; } resource = resource_config_name; if (pe_find_resource(fake_resource_list, resource) == NULL) { const char *longname = pcmk__xe_get(action_rsc, PCMK__XA_LONG_ID); if ((longname != NULL) && (pe_find_resource(fake_resource_list, longname) != NULL)) { resource = longname; } } // Certain actions need to be displayed but don't need history entries if (pcmk__strcase_any_of(operation, PCMK_ACTION_DELETE, PCMK_ACTION_META_DATA, NULL)) { out->message(out, "inject-rsc-action", resource, operation, node, (guint) 0); goto done; // Confirm action and update graph } rclass = pcmk__xe_get(action_rsc, PCMK_XA_CLASS); rtype = pcmk__xe_get(action_rsc, PCMK_XA_TYPE); rprovider = pcmk__xe_get(action_rsc, PCMK_XA_PROVIDER); pcmk__scan_min_int(target_rc_s, &target_outcome, 0); pcmk__assert(fake_cib->cmds->query(fake_cib, NULL, NULL, cib_sync_call) == pcmk_ok); // Ensure the action node is in the CIB - uuid = crm_element_value_copy(action->xml, PCMK__META_ON_NODE_UUID); + uuid = pcmk__xe_get_copy(action->xml, PCMK__META_ON_NODE_UUID); cib_node = pcmk__inject_node(fake_cib, node, ((router_node == NULL)? uuid: node)); free(uuid); pcmk__assert(cib_node != NULL); // Add a history entry for the action cib_resource = pcmk__inject_resource_history(out, cib_node, resource, resource_config_name, rclass, rtype, rprovider); if (cib_resource == NULL) { crm_err("Could not simulate action %d history for resource %s", action->id, resource); free(node); pcmk__xml_free(cib_node); return EINVAL; } // Simulate and display an executor event for the action result op = pcmk__event_from_graph_action(cib_resource, action, PCMK_EXEC_DONE, target_outcome, "User-injected result"); out->message(out, "inject-rsc-action", resource, op->op_type, node, op->interval_ms); // Check whether action is in a list of desired simulated failures for (const GList *iter = fake_op_fail_list; iter != NULL; iter = iter->next) { const char *spec = (const char *) iter->data; char *key = NULL; const char *match_name = NULL; const char *offset = NULL; // Allow user to specify anonymous clone with or without instance number key = crm_strdup_printf(PCMK__OP_FMT "@%s=", resource, op->op_type, op->interval_ms, node); if (strncasecmp(key, spec, strlen(key)) == 0) { match_name = resource; } free(key); // If not found, try the resource's name in the configuration if ((match_name == NULL) && (strcmp(resource, resource_config_name) != 0)) { key = crm_strdup_printf(PCMK__OP_FMT "@%s=", resource_config_name, op->op_type, op->interval_ms, node); if (strncasecmp(key, spec, strlen(key)) == 0) { match_name = resource_config_name; } free(key); } if (match_name == NULL) { continue; // This failed action entry doesn't match } // ${match_name}_${task}_${interval_in_ms}@${node}=${rc} rc = sscanf(spec, "%*[^=]=%d", (int *) &op->rc); if (rc != 1) { out->err(out, "Invalid failed operation '%s' " "(result code must be integer)", spec); continue; // Keep checking other list entries } out->info(out, "Pretending action %d failed with rc=%d", action->id, op->rc); pcmk__set_graph_action_flags(action, pcmk__graph_action_failed); graph->abort_priority = PCMK_SCORE_INFINITY; if (pcmk__str_eq(op->op_type, PCMK_ACTION_START, pcmk__str_none)) { offset = pcmk__s(graph->failed_start_offset, PCMK_VALUE_INFINITY); } else if (pcmk__str_eq(op->op_type, PCMK_ACTION_STOP, pcmk__str_none)) { offset = pcmk__s(graph->failed_stop_offset, PCMK_VALUE_INFINITY); } pcmk__inject_failcount(out, fake_cib, cib_node, match_name, op->op_type, op->interval_ms, op->rc, pcmk_str_is_infinity(offset)); break; } pcmk__inject_action_result(cib_resource, op, node, target_outcome); lrmd_free_event(op); rc = fake_cib->cmds->modify(fake_cib, PCMK_XE_STATUS, cib_node, cib_sync_call); pcmk__assert(rc == pcmk_ok); done: free(node); pcmk__xml_free(cib_node); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); pcmk__update_graph(graph, action); return pcmk_rc_ok; } /*! * \internal * \brief Simulate successfully executing a cluster action * * \param[in,out] graph Graph to update with action result * \param[in,out] action Cluster action to simulate * * \return Standard Pacemaker return code */ static int simulate_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *node = pcmk__xe_get(action->xml, PCMK__META_ON_NODE); const char *task = pcmk__xe_get(action->xml, PCMK_XA_OPERATION); xmlNode *rsc = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL, NULL); pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); out->message(out, "inject-cluster-action", node, task, rsc); pcmk__update_graph(graph, action); return pcmk_rc_ok; } /*! * \internal * \brief Simulate successfully executing a fencing action * * \param[in,out] graph Graph to update with action result * \param[in,out] action Fencing action to simulate * * \return Standard Pacemaker return code */ static int simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action) { const char *op = crm_meta_value(action->params, PCMK__META_STONITH_ACTION); - char *target = crm_element_value_copy(action->xml, PCMK__META_ON_NODE); + char *target = pcmk__xe_get_copy(action->xml, PCMK__META_ON_NODE); out->message(out, "inject-fencing-action", target, op); if (!pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) { int rc = pcmk_ok; GString *xpath = g_string_sized_new(512); // Set node state to offline xmlNode *cib_node = pcmk__inject_node_state_change(fake_cib, target, false); pcmk__assert(cib_node != NULL); crm_xml_add(cib_node, PCMK_XA_CRM_DEBUG_ORIGIN, __func__); rc = fake_cib->cmds->replace(fake_cib, PCMK_XE_STATUS, cib_node, cib_sync_call); pcmk__assert(rc == pcmk_ok); // Simulate controller clearing node's resource history and attributes pcmk__g_strcat(xpath, "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='", target, "']/" PCMK__XE_LRM, NULL); fake_cib->cmds->remove(fake_cib, (const char *) xpath->str, NULL, cib_xpath|cib_sync_call); g_string_truncate(xpath, 0); pcmk__g_strcat(xpath, "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='", target, "']" "/" PCMK__XE_TRANSIENT_ATTRIBUTES, NULL); fake_cib->cmds->remove(fake_cib, (const char *) xpath->str, NULL, cib_xpath|cib_sync_call); pcmk__xml_free(cib_node); g_string_free(xpath, TRUE); } pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed); pcmk__update_graph(graph, action); free(target); return pcmk_rc_ok; } enum pcmk__graph_status pcmk__simulate_transition(pcmk_scheduler_t *scheduler, cib_t *cib, const GList *op_fail_list) { pcmk__graph_t *transition = NULL; enum pcmk__graph_status graph_rc; pcmk__graph_functions_t simulation_fns = { simulate_pseudo_action, simulate_resource_action, simulate_cluster_action, simulate_fencing_action, }; out = scheduler->priv->out; fake_cib = cib; fake_op_fail_list = op_fail_list; if (!out->is_quiet(out)) { out->begin_list(out, NULL, NULL, "Executing Cluster Transition"); } pcmk__set_graph_functions(&simulation_fns); transition = pcmk__unpack_graph(scheduler->priv->graph, crm_system_name); pcmk__log_graph(LOG_DEBUG, transition); fake_resource_list = scheduler->priv->resources; do { graph_rc = pcmk__execute_graph(transition); } while (graph_rc == pcmk__graph_active); fake_resource_list = NULL; if (graph_rc != pcmk__graph_complete) { out->err(out, "Transition failed: %s", pcmk__graph_status2text(graph_rc)); pcmk__log_graph(LOG_ERR, transition); out->err(out, "An invalid transition was produced"); } pcmk__free_graph(transition); if (!out->is_quiet(out)) { // If not quiet, we'll need the resulting CIB for later display xmlNode *cib_object = NULL; int rc = fake_cib->cmds->query(fake_cib, NULL, &cib_object, cib_sync_call); pcmk__assert(rc == pcmk_ok); pcmk_reset_scheduler(scheduler); scheduler->input = cib_object; out->end_list(out); } return graph_rc; } int pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out, const pcmk_injections_t *injections, uint32_t flags, uint32_t section_opts, const char *use_date, const char *input_file, const char *graph_file, const char *dot_file) { int printed = pcmk_rc_no_output; int rc = pcmk_rc_ok; xmlNodePtr input = NULL; cib_t *cib = NULL; rc = cib__signon_query(out, &cib, &input); if (rc != pcmk_rc_ok) { goto simulate_done; } reset(scheduler, input, out, use_date, flags); if (!out->is_quiet(out)) { const bool show_pending = pcmk_is_set(flags, pcmk_sim_show_pending); if (pcmk_is_set(scheduler->flags, pcmk__sched_in_maintenance)) { printed = out->message(out, "maint-mode", scheduler->flags); } if ((scheduler->priv->disabled_resources > 0) || (scheduler->priv->blocked_resources > 0)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); printed = out->info(out, "%d of %d resource instances DISABLED and " "%d BLOCKED from further action due to failure", scheduler->priv->disabled_resources, scheduler->priv->ninstances, scheduler->priv->blocked_resources); } /* Most formatted output headers use caps for each word, but this one * only has the first word capitalized for compatibility with pcs. */ print_cluster_status(scheduler, (show_pending? pcmk_show_pending : 0), section_opts, "Current cluster status", (printed == pcmk_rc_ok)); printed = pcmk_rc_ok; } // If the user requested any injections, handle them if ((injections->node_down != NULL) || (injections->node_fail != NULL) || (injections->node_up != NULL) || (injections->op_inject != NULL) || (injections->ticket_activate != NULL) || (injections->ticket_grant != NULL) || (injections->ticket_revoke != NULL) || (injections->ticket_standby != NULL) || (injections->watchdog != NULL)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); pcmk__inject_scheduler_input(scheduler, cib, injections); printed = pcmk_rc_ok; rc = cib->cmds->query(cib, NULL, &input, cib_sync_call); if (rc != pcmk_rc_ok) { rc = pcmk_legacy2rc(rc); goto simulate_done; } reset(scheduler, input, out, use_date, flags); } if (input_file != NULL) { rc = pcmk__xml_write_file(input, input_file, false); if (rc != pcmk_rc_ok) { goto simulate_done; } } if (pcmk_any_flags_set(flags, pcmk_sim_process | pcmk_sim_simulate)) { pcmk__output_t *logger_out = NULL; if (pcmk_all_flags_set(scheduler->flags, pcmk__sched_output_scores |pcmk__sched_show_utilization)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Assignment Scores and Utilization Information"); printed = pcmk_rc_ok; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_output_scores)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Assignment Scores"); printed = pcmk_rc_ok; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_show_utilization)) { PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Utilization Information"); printed = pcmk_rc_ok; } else { rc = pcmk__log_output_new(&logger_out); if (rc != pcmk_rc_ok) { goto simulate_done; } pe__register_messages(logger_out); pcmk__register_lib_messages(logger_out); scheduler->priv->out = logger_out; } pcmk__schedule_actions(scheduler); if (logger_out == NULL) { out->end_list(out); } else { logger_out->finish(logger_out, CRM_EX_OK, true, NULL); pcmk__output_free(logger_out); scheduler->priv->out = out; } input = NULL; /* Don't try and free it twice */ if (graph_file != NULL) { rc = pcmk__xml_write_file(scheduler->priv->graph, graph_file, false); if (rc != pcmk_rc_ok) { rc = pcmk_rc_graph_error; goto simulate_done; } } if (dot_file != NULL) { rc = write_sim_dotfile(scheduler, dot_file, pcmk_is_set(flags, pcmk_sim_all_actions), pcmk_is_set(flags, pcmk_sim_verbose)); if (rc != pcmk_rc_ok) { rc = pcmk_rc_dot_error; goto simulate_done; } } if (!out->is_quiet(out)) { print_transition_summary(scheduler, printed == pcmk_rc_ok); } } rc = pcmk_rc_ok; if (!pcmk_is_set(flags, pcmk_sim_simulate)) { goto simulate_done; } PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok); if (pcmk__simulate_transition(scheduler, cib, injections->op_fail) != pcmk__graph_complete) { rc = pcmk_rc_invalid_transition; } if (out->is_quiet(out)) { goto simulate_done; } set_effective_date(scheduler, true, use_date); if (pcmk_is_set(flags, pcmk_sim_show_scores)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_output_scores); } if (pcmk_is_set(flags, pcmk_sim_show_utilization)) { pcmk__set_scheduler_flags(scheduler, pcmk__sched_show_utilization); } cluster_status(scheduler); print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status", true); simulate_done: cib__clean_up_connection(&cib); return rc; } // @COMPAT Use uint32_t for flags int pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler, const pcmk_injections_t *injections, unsigned int flags, unsigned int section_opts, const char *use_date, const char *input_file, const char *graph_file, const char *dot_file) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; rc = pcmk__xml_output_new(&out, xml); if (rc != pcmk_rc_ok) { return rc; } pe__register_messages(out); pcmk__register_lib_messages(out); rc = pcmk__simulate(scheduler, out, injections, (uint32_t) flags, (uint32_t) section_opts, use_date, input_file, graph_file, dot_file); pcmk__xml_output_finish(out, pcmk_rc2exitc(rc), xml); return rc; } diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c index 4daa821126..2fa6d52596 100644 --- a/lib/pengine/bundle.c +++ b/lib/pengine/bundle.c @@ -1,2098 +1,2097 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include // bool, true, false #include #include #include #include #include #include #include enum pe__bundle_mount_flags { pe__bundle_mount_none = 0x00, // mount instance-specific subdirectory rather than source directly pe__bundle_mount_subdir = 0x01 }; typedef struct { char *source; char *target; char *options; uint32_t flags; // bitmask of pe__bundle_mount_flags } pe__bundle_mount_t; typedef struct { char *source; char *target; } pe__bundle_port_t; enum pe__container_agent { PE__CONTAINER_AGENT_UNKNOWN, PE__CONTAINER_AGENT_DOCKER, PE__CONTAINER_AGENT_PODMAN, }; #define PE__CONTAINER_AGENT_UNKNOWN_S "unknown" #define PE__CONTAINER_AGENT_DOCKER_S "docker" #define PE__CONTAINER_AGENT_PODMAN_S "podman" typedef struct pe__bundle_variant_data_s { int promoted_max; int nreplicas; int nreplicas_per_host; char *prefix; char *image; const char *ip_last; char *host_network; char *host_netmask; char *control_port; char *container_network; char *ip_range_start; gboolean add_host; gchar *container_host_options; char *container_command; char *launcher_options; const char *attribute_target; pcmk_resource_t *child; GList *replicas; // pcmk__bundle_replica_t * GList *ports; // pe__bundle_port_t * GList *mounts; // pe__bundle_mount_t * /* @TODO Maybe use a more object-oriented design instead, with a set of * methods that are different per type rather than switching on this */ enum pe__container_agent agent_type; } pe__bundle_variant_data_t; #define get_bundle_variant_data(data, rsc) do { \ pcmk__assert(pcmk__is_bundle(rsc)); \ data = rsc->priv->variant_opaque; \ } while (0) /*! * \internal * \brief Get maximum number of bundle replicas allowed to run * * \param[in] rsc Bundle or bundled resource to check * * \return Maximum replicas for bundle corresponding to \p rsc */ int pe__bundle_max(const pcmk_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->nreplicas; } /*! * \internal * \brief Get the resource inside a bundle * * \param[in] bundle Bundle to check * * \return Resource inside \p bundle if any, otherwise NULL */ pcmk_resource_t * pe__bundled_resource(const pcmk_resource_t *rsc) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true)); return bundle_data->child; } /*! * \internal * \brief Get containerized resource corresponding to a given bundle container * * \param[in] instance Collective instance that might be a bundle container * * \return Bundled resource instance inside \p instance if it is a bundle * container instance, otherwise NULL */ const pcmk_resource_t * pe__get_rsc_in_container(const pcmk_resource_t *instance) { const pe__bundle_variant_data_t *data = NULL; const pcmk_resource_t *top = pe__const_top_resource(instance, true); if (!pcmk__is_bundle(top)) { return NULL; } get_bundle_variant_data(data, top); for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) { const pcmk__bundle_replica_t *replica = iter->data; if (instance == replica->container) { return replica->child; } } return NULL; } /*! * \internal * \brief Check whether a given node is created by a bundle * * \param[in] bundle Bundle resource to check * \param[in] node Node to check * * \return true if \p node is an instance of \p bundle, otherwise false */ bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle, const pcmk_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; if (pcmk__same_node(node, replica->node)) { return true; } } return false; } /*! * \internal * \brief Get the container of a bundle's first replica * * \param[in] bundle Bundle resource to get container for * * \return Container resource from first replica of \p bundle if any, * otherwise NULL */ pcmk_resource_t * pe__first_container(const pcmk_resource_t *bundle) { const pe__bundle_variant_data_t *bundle_data = NULL; const pcmk__bundle_replica_t *replica = NULL; get_bundle_variant_data(bundle_data, bundle); if (bundle_data->replicas == NULL) { return NULL; } replica = bundle_data->replicas->data; return replica->container; } /*! * \internal * \brief Iterate over bundle replicas * * \param[in,out] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_bundle_replica(pcmk_resource_t *bundle, bool (*fn)(pcmk__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((pcmk__bundle_replica_t *) iter->data, user_data)) { break; } } } /*! * \internal * \brief Iterate over const bundle replicas * * \param[in] bundle Bundle to iterate over * \param[in] fn Function to call for each replica (its return value * indicates whether to continue iterating) * \param[in,out] user_data Pointer to pass to \p fn */ void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle, bool (*fn)(const pcmk__bundle_replica_t *, void *), void *user_data) { const pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, bundle); for (const GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) { if (!fn((const pcmk__bundle_replica_t *) iter->data, user_data)) { break; } } } static char * next_ip(const char *last_ip) { unsigned int oct1 = 0; unsigned int oct2 = 0; unsigned int oct3 = 0; unsigned int oct4 = 0; int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4); if (rc != 4) { /*@ TODO check for IPv6 */ return NULL; } else if (oct3 > 253) { return NULL; } else if (oct4 > 253) { ++oct3; oct4 = 1; } else { ++oct4; } return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4); } static void allocate_ip(pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica, GString *buffer) { if(data->ip_range_start == NULL) { return; } else if(data->ip_last) { replica->ipaddr = next_ip(data->ip_last); } else { replica->ipaddr = strdup(data->ip_range_start); } data->ip_last = replica->ipaddr; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (data->add_host) { g_string_append_printf(buffer, " --add-host=%s-%d:%s", data->prefix, replica->offset, replica->ipaddr); } else { g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d", replica->ipaddr, data->prefix, replica->offset); } break; default: // PE__CONTAINER_AGENT_UNKNOWN break; } } static xmlNode * create_resource(const char *name, const char *provider, const char *kind) { xmlNode *rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE); crm_xml_add(rsc, PCMK_XA_ID, name); crm_xml_add(rsc, PCMK_XA_CLASS, PCMK_RESOURCE_CLASS_OCF); crm_xml_add(rsc, PCMK_XA_PROVIDER, provider); crm_xml_add(rsc, PCMK_XA_TYPE, kind); return rsc; } /*! * \internal * \brief Check whether cluster can manage resource inside container * * \param[in,out] data Container variant data * * \return TRUE if networking configuration is acceptable, FALSE otherwise * * \note The resource is manageable if an IP range or control port has been * specified. If a control port is used without an IP range, replicas per * host must be 1. */ static bool valid_network(pe__bundle_variant_data_t *data) { if(data->ip_range_start) { return TRUE; } if(data->control_port) { if(data->nreplicas_per_host > 1) { pcmk__config_err("Specifying the '" PCMK_XA_CONTROL_PORT "' for %s " "requires '" PCMK_XA_REPLICAS_PER_HOST "=1'", data->prefix); data->nreplicas_per_host = 1; // @TODO to be sure: // pcmk__clear_rsc_flags(rsc, pcmk__rsc_unique); } return TRUE; } return FALSE; } static int create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { if(data->ip_range_start) { char *id = NULL; xmlNode *xml_ip = NULL; xmlNode *xml_obj = NULL; id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr); pcmk__xml_sanitize_id(id); xml_ip = create_resource(id, "heartbeat", "IPaddr2"); free(id); xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_INSTANCE_ATTRIBUTES); pcmk__xe_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr); if(data->host_network) { crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network); } if(data->host_netmask) { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", data->host_netmask); } else { crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32"); } xml_obj = pcmk__xe_create(xml_ip, PCMK_XE_OPERATIONS); crm_create_op_xml(xml_obj, pcmk__xe_id(xml_ip), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_ip, &replica->ip, parent, parent->priv->scheduler) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } parent->priv->children = g_list_append(parent->priv->children, replica->ip); } return pcmk_rc_ok; } static const char* container_agent_str(enum pe__container_agent t) { switch (t) { case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S; case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S; default: // PE__CONTAINER_AGENT_UNKNOWN break; } return PE__CONTAINER_AGENT_UNKNOWN_S; } static int create_container_resource(pcmk_resource_t *parent, const pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { char *id = NULL; xmlNode *xml_container = NULL; xmlNode *xml_obj = NULL; // Agent-specific const char *hostname_opt = NULL; const char *env_opt = NULL; const char *agent_str = NULL; GString *buffer = NULL; GString *dbuffer = NULL; // Where syntax differences are drop-in replacements, set them now switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: hostname_opt = "-h "; env_opt = "-e "; break; default: // PE__CONTAINER_AGENT_UNKNOWN return pcmk_rc_unpack_error; } agent_str = container_agent_str(data->agent_type); buffer = g_string_sized_new(4096); id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str, replica->offset); pcmk__xml_sanitize_id(id); xml_container = create_resource(id, "heartbeat", agent_str); free(id); xml_obj = pcmk__xe_create(xml_container, PCMK_XE_INSTANCE_ATTRIBUTES); pcmk__xe_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset); crm_create_nvpair_xml(xml_obj, NULL, "image", data->image); crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", PCMK_VALUE_TRUE); crm_create_nvpair_xml(xml_obj, NULL, "force_kill", PCMK_VALUE_FALSE); crm_create_nvpair_xml(xml_obj, NULL, "reuse", PCMK_VALUE_FALSE); if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) { g_string_append(buffer, " --restart=no"); } /* Set a container hostname only if we have an IP to map it to. The user can * set -h or --uts=host themselves if they want a nicer name for logs, but * this makes applications happy who need their hostname to match the IP * they bind to. */ if (data->ip_range_start != NULL) { g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix, replica->offset); } pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL); if (data->container_network != NULL) { pcmk__g_strcat(buffer, " --net=", data->container_network, NULL); } if (data->control_port != NULL) { pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=", data->control_port, NULL); } else { g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d", env_opt, DEFAULT_REMOTE_PORT); } for (GList *iter = data->mounts; iter != NULL; iter = iter->next) { pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data; char *source = NULL; if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) { source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix, replica->offset); pcmk__add_separated_word(&dbuffer, 1024, source, ","); } switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: pcmk__g_strcat(buffer, " -v ", pcmk__s(source, mount->source), ":", mount->target, NULL); if (mount->options != NULL) { pcmk__g_strcat(buffer, ":", mount->options, NULL); } break; default: break; } free(source); } for (GList *iter = data->ports; iter != NULL; iter = iter->next) { pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data; switch (data->agent_type) { case PE__CONTAINER_AGENT_DOCKER: case PE__CONTAINER_AGENT_PODMAN: if (replica->ipaddr != NULL) { pcmk__g_strcat(buffer, " -p ", replica->ipaddr, ":", port->source, ":", port->target, NULL); } else if (!pcmk__str_eq(data->container_network, PCMK_VALUE_HOST, pcmk__str_none)) { // No need to do port mapping if net == host pcmk__g_strcat(buffer, " -p ", port->source, ":", port->target, NULL); } break; default: break; } } /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because * it would cause restarts during rolling upgrades. * * In a previous version of the container resource creation logic, if * data->launcher_options is not NULL, we append * (" %s", data->launcher_options) even if data->launcher_options is an * empty string. Likewise for data->container_host_options. Using * * pcmk__add_word(buffer, 0, data->launcher_options) * * removes that extra trailing space, causing a resource definition change. */ if (data->launcher_options != NULL) { pcmk__g_strcat(buffer, " ", data->launcher_options, NULL); } if (data->container_host_options != NULL) { pcmk__g_strcat(buffer, " ", data->container_host_options, NULL); } crm_create_nvpair_xml(xml_obj, NULL, "run_opts", (const char *) buffer->str); g_string_free(buffer, TRUE); crm_create_nvpair_xml(xml_obj, NULL, "mount_points", (dbuffer != NULL)? (const char *) dbuffer->str : ""); if (dbuffer != NULL) { g_string_free(dbuffer, TRUE); } if (replica->child != NULL) { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } else { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR "/" PCMK__SERVER_REMOTED); } /* TODO: Allow users to specify their own? * * We just want to know if the container is alive; we'll monitor the * child independently. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); #if 0 /* @TODO Consider supporting the use case where we can start and stop * resources, but not proxy local commands (such as setting node * attributes), by running the local executor in stand-alone mode. * However, this would probably be better done via ACLs as with other * Pacemaker Remote nodes. */ } else if ((child != NULL) && data->untrusted) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", CRM_DAEMON_DIR "/" PCMK__SERVER_EXECD); crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke"); #endif } else { if (data->container_command != NULL) { crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->container_command); } /* TODO: Allow users to specify their own? * * We don't know what's in the container, so we just want to know if it * is alive. */ crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true"); } xml_obj = pcmk__xe_create(xml_container, PCMK_XE_OPERATIONS); crm_create_op_xml(xml_obj, pcmk__xe_id(xml_container), PCMK_ACTION_MONITOR, "60s", NULL); // TODO: Other ops? Timeouts and intervals from underlying resource? if (pe__unpack_resource(xml_container, &replica->container, parent, parent->priv->scheduler) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } pcmk__set_rsc_flags(replica->container, pcmk__rsc_replica_container); parent->priv->children = g_list_append(parent->priv->children, replica->container); return pcmk_rc_ok; } /*! * \brief Ban a node from a resource's (and its children's) allowed nodes list * * \param[in,out] rsc Resource to modify * \param[in] uname Name of node to ban */ static void disallow_node(pcmk_resource_t *rsc, const char *uname) { gpointer match = g_hash_table_lookup(rsc->priv->allowed_nodes, uname); if (match) { ((pcmk_node_t *) match)->assign->score = -PCMK_SCORE_INFINITY; ((pcmk_node_t *) match)->assign->probe_mode = pcmk__probe_never; } g_list_foreach(rsc->priv->children, (GFunc) disallow_node, (gpointer) uname); } static int create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { if (replica->child && valid_network(data)) { GHashTableIter gIter; pcmk_node_t *node = NULL; xmlNode *xml_remote = NULL; char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset); char *port_s = NULL; const char *uname = NULL; const char *connect_name = NULL; pcmk_scheduler_t *scheduler = parent->priv->scheduler; if (pe_find_resource(scheduler->priv->resources, id) != NULL) { free(id); // The biggest hammer we have id = crm_strdup_printf("pcmk-internal-%s-remote-%d", replica->child->id, replica->offset); //@TODO return error instead of asserting? pcmk__assert(pe_find_resource(scheduler->priv->resources, id) == NULL); } /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the * connection does not have its own IP is a magic string that we use to * support nested remotes (i.e. a bundle running on a remote node). */ connect_name = (replica->ipaddr? replica->ipaddr : "#uname"); if (data->control_port == NULL) { port_s = pcmk__itoa(DEFAULT_REMOTE_PORT); } /* This sets replica->container as replica->remote's container, which is * similar to what happens with guest nodes. This is how the scheduler * knows that the bundle node is fenced by recovering the container, and * that remote should be ordered relative to the container. */ xml_remote = pe_create_remote_xml(NULL, id, replica->container->id, NULL, NULL, NULL, connect_name, (data->control_port? data->control_port : port_s)); free(port_s); /* Abandon our created ID, and pull the copy from the XML, because we * need something that will get freed during scheduler data cleanup to * use as the node ID and uname. */ free(id); id = NULL; uname = pcmk__xe_id(xml_remote); /* Ensure a node has been created for the guest (it may have already * been, if it has a permanent node attribute), and ensure its weight is * -INFINITY so no other resources can run on it. */ node = pcmk_find_node(scheduler, uname); if (node == NULL) { node = pe_create_node(uname, uname, PCMK_VALUE_REMOTE, -PCMK_SCORE_INFINITY, scheduler); } else { node->assign->score = -PCMK_SCORE_INFINITY; } node->assign->probe_mode = pcmk__probe_never; /* unpack_remote_nodes() ensures that each remote node and guest node * has a pcmk_node_t entry. Ideally, it would do the same for bundle * nodes. Unfortunately, a bundle has to be mostly unpacked before it's * obvious what nodes will be needed, so we do it just above. * * Worse, that means that the node may have been utilized while * unpacking other resources, without our weight correction. The most * likely place for this to happen is when pe__unpack_resource() calls * resource_location() to set a default score in symmetric clusters. * This adds a node *copy* to each resource's allowed nodes, and these * copies will have the wrong weight. * * As a hacky workaround, fix those copies here. * * @TODO Possible alternative: ensure bundles are unpacked before other * resources, so the weight is correct before any copies are made. */ g_list_foreach(scheduler->priv->resources, (GFunc) disallow_node, (gpointer) uname); replica->node = pe__copy_node(node); replica->node->assign->score = 500; replica->node->assign->probe_mode = pcmk__probe_exclusive; /* Ensure the node shows up as allowed and with the correct discovery set */ if (replica->child->priv->allowed_nodes != NULL) { g_hash_table_destroy(replica->child->priv->allowed_nodes); } replica->child->priv->allowed_nodes = pcmk__strkey_table(NULL, pcmk__free_node_copy); g_hash_table_insert(replica->child->priv->allowed_nodes, (gpointer) replica->node->priv->id, pe__copy_node(replica->node)); { const pcmk_resource_t *parent = replica->child->priv->parent; pcmk_node_t *copy = pe__copy_node(replica->node); copy->assign->score = -PCMK_SCORE_INFINITY; g_hash_table_insert(parent->priv->allowed_nodes, (gpointer) replica->node->priv->id, copy); } if (pe__unpack_resource(xml_remote, &replica->remote, parent, scheduler) != pcmk_rc_ok) { return pcmk_rc_unpack_error; } g_hash_table_iter_init(&gIter, replica->remote->priv->allowed_nodes); while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) { if (pcmk__is_pacemaker_remote_node(node)) { /* Remote resources can only run on 'normal' cluster node */ node->assign->score = -PCMK_SCORE_INFINITY; } } replica->node->priv->remote = replica->remote; // Ensure pcmk__is_guest_or_bundle_node() functions correctly replica->remote->priv->launcher = replica->container; /* A bundle's #kind is closer to "container" (guest node) than the * "remote" set by pe_create_node(). */ pcmk__insert_dup(replica->node->priv->attrs, CRM_ATTR_KIND, "container"); /* One effect of this is that unpack_launcher() will add * replica->remote to replica->container's launched resources, which * will make pe__resource_contains_guest_node() true for * replica->container. * * replica->child does NOT get added to replica->container's launched * resources. The only noticeable effect if it did would be for its * fail count to be taken into account when checking * replica->container's migration threshold. */ parent->priv->children = g_list_append(parent->priv->children, replica->remote); } return pcmk_rc_ok; } static int create_replica_resources(pcmk_resource_t *parent, pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica) { int rc = pcmk_rc_ok; rc = create_container_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_ip_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } rc = create_remote_resource(parent, data, replica); if (rc != pcmk_rc_ok) { return rc; } if ((replica->child != NULL) && (replica->ipaddr != NULL)) { pcmk__insert_meta(replica->child->priv, "external-ip", replica->ipaddr); } if (replica->remote != NULL) { /* * Allow the remote connection resource to be allocated to a * different node than the one on which the container is active. * * This makes it possible to have Pacemaker Remote nodes running * containers with the remote executor inside in order to start * services inside those containers. */ pcmk__set_rsc_flags(replica->remote, pcmk__rsc_remote_nesting_allowed); } return rc; } static void mount_add(pe__bundle_variant_data_t *bundle_data, const char *source, const char *target, const char *options, uint32_t flags) { pe__bundle_mount_t *mount = pcmk__assert_alloc(1, sizeof(pe__bundle_mount_t)); mount->source = pcmk__str_copy(source); mount->target = pcmk__str_copy(target); mount->options = pcmk__str_copy(options); mount->flags = flags; bundle_data->mounts = g_list_append(bundle_data->mounts, mount); } static void mount_free(pe__bundle_mount_t *mount) { free(mount->source); free(mount->target); free(mount->options); free(mount); } static void port_free(pe__bundle_port_t *port) { free(port->source); free(port->target); free(port); } static pcmk__bundle_replica_t * replica_for_remote(pcmk_resource_t *remote) { pcmk_resource_t *top = remote; pe__bundle_variant_data_t *bundle_data = NULL; if (top == NULL) { return NULL; } while (top->priv->parent != NULL) { top = top->priv->parent; } get_bundle_variant_data(bundle_data, top); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (replica->remote == remote) { return replica; } } CRM_LOG_ASSERT(FALSE); return NULL; } bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc) { const char *value; GHashTable *params = NULL; if (rsc == NULL) { return false; } // Use NULL node since pcmk__bundle_expand() uses that to set value params = pe_rsc_params(rsc, NULL, rsc->priv->scheduler); value = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR); return pcmk__str_eq(value, "#uname", pcmk__str_casei) && xml_contains_remote_node(rsc->priv->xml); } const char * pe__add_bundle_remote_name(pcmk_resource_t *rsc, xmlNode *xml, const char *field) { // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside pcmk_node_t *node = NULL; pcmk__bundle_replica_t *replica = NULL; if (!pe__bundle_needs_remote_name(rsc)) { return NULL; } replica = replica_for_remote(rsc); if (replica == NULL) { return NULL; } node = replica->container->priv->assigned_node; if (node == NULL) { /* If it won't be running anywhere after the * transition, go with where it's running now. */ node = pcmk__current_node(replica->container); } if(node == NULL) { crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } crm_trace("Setting address for bundle connection %s to bundle host %s", rsc->id, pcmk__node_name(node)); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->priv->name); } return node->priv->name; } #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do { \ flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "Bundle mount", pcmk__xe_id(mount_xml), \ flags, (flags_to_set), #flags_to_set); \ } while (0) bool pe__unpack_bundle(pcmk_resource_t *rsc) { const char *value = NULL; xmlNode *xml_obj = NULL; const xmlNode *xml_child = NULL; xmlNode *xml_resource = NULL; pe__bundle_variant_data_t *bundle_data = NULL; bool need_log_mount = TRUE; pcmk__assert(rsc != NULL); pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id); bundle_data = pcmk__assert_alloc(1, sizeof(pe__bundle_variant_data_t)); rsc->priv->variant_opaque = bundle_data; bundle_data->prefix = strdup(rsc->id); xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_DOCKER, NULL, NULL); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER; } if (xml_obj == NULL) { xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_PODMAN, NULL, NULL); if (xml_obj != NULL) { bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN; } } if (xml_obj == NULL) { return FALSE; } // Use 0 for default, minimum, and invalid PCMK_XA_PROMOTED_MAX value = pcmk__xe_get(xml_obj, PCMK_XA_PROMOTED_MAX); pcmk__scan_min_int(value, &bundle_data->promoted_max, 0); /* Default replicas to PCMK_XA_PROMOTED_MAX if it was specified and 1 * otherwise */ value = pcmk__xe_get(xml_obj, PCMK_XA_REPLICAS); if ((value == NULL) && (bundle_data->promoted_max > 0)) { bundle_data->nreplicas = bundle_data->promoted_max; } else { pcmk__scan_min_int(value, &bundle_data->nreplicas, 1); } /* * Communication between containers on the same host via the * floating IPs only works if the container is started with: * --userland-proxy=false --ip-masq=false */ value = pcmk__xe_get(xml_obj, PCMK_XA_REPLICAS_PER_HOST); pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1); if (bundle_data->nreplicas_per_host == 1) { pcmk__clear_rsc_flags(rsc, pcmk__rsc_unique); } - bundle_data->container_command = - crm_element_value_copy(xml_obj, PCMK_XA_RUN_COMMAND); - bundle_data->launcher_options = crm_element_value_copy(xml_obj, - PCMK_XA_OPTIONS); - bundle_data->image = crm_element_value_copy(xml_obj, PCMK_XA_IMAGE); - bundle_data->container_network = crm_element_value_copy(xml_obj, - PCMK_XA_NETWORK); + bundle_data->container_command = pcmk__xe_get_copy(xml_obj, + PCMK_XA_RUN_COMMAND); + bundle_data->launcher_options = pcmk__xe_get_copy(xml_obj, PCMK_XA_OPTIONS); + bundle_data->image = pcmk__xe_get_copy(xml_obj, PCMK_XA_IMAGE); + bundle_data->container_network = pcmk__xe_get_copy(xml_obj, + PCMK_XA_NETWORK); xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_NETWORK, NULL, NULL); if(xml_obj) { - bundle_data->ip_range_start = - crm_element_value_copy(xml_obj, PCMK_XA_IP_RANGE_START); - bundle_data->host_netmask = - crm_element_value_copy(xml_obj, PCMK_XA_HOST_NETMASK); - bundle_data->host_network = - crm_element_value_copy(xml_obj, PCMK_XA_HOST_INTERFACE); - bundle_data->control_port = - crm_element_value_copy(xml_obj, PCMK_XA_CONTROL_PORT); + bundle_data->ip_range_start = pcmk__xe_get_copy(xml_obj, + PCMK_XA_IP_RANGE_START); + bundle_data->host_netmask = pcmk__xe_get_copy(xml_obj, + PCMK_XA_HOST_NETMASK); + bundle_data->host_network = pcmk__xe_get_copy(xml_obj, + PCMK_XA_HOST_INTERFACE); + bundle_data->control_port = pcmk__xe_get_copy(xml_obj, + PCMK_XA_CONTROL_PORT); value = pcmk__xe_get(xml_obj, PCMK_XA_ADD_HOST); if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) { bundle_data->add_host = TRUE; } for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_PORT_MAPPING, NULL, NULL); xml_child != NULL; xml_child = pcmk__xe_next(xml_child, PCMK_XE_PORT_MAPPING)) { pe__bundle_port_t *port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t)); - port->source = crm_element_value_copy(xml_child, PCMK_XA_PORT); + port->source = pcmk__xe_get_copy(xml_child, PCMK_XA_PORT); if(port->source == NULL) { - port->source = crm_element_value_copy(xml_child, PCMK_XA_RANGE); + port->source = pcmk__xe_get_copy(xml_child, PCMK_XA_RANGE); } else { - port->target = crm_element_value_copy(xml_child, - PCMK_XA_INTERNAL_PORT); + port->target = pcmk__xe_get_copy(xml_child, + PCMK_XA_INTERNAL_PORT); } if(port->source != NULL && strlen(port->source) > 0) { if(port->target == NULL) { port->target = strdup(port->source); } bundle_data->ports = g_list_append(bundle_data->ports, port); } else { pcmk__config_err("Invalid " PCMK_XA_PORT " directive %s", pcmk__xe_id(xml_child)); port_free(port); } } } xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_STORAGE, NULL, NULL); for (xml_child = pcmk__xe_first_child(xml_obj, PCMK_XE_STORAGE_MAPPING, NULL, NULL); xml_child != NULL; xml_child = pcmk__xe_next(xml_child, PCMK_XE_STORAGE_MAPPING)) { const char *source = pcmk__xe_get(xml_child, PCMK_XA_SOURCE_DIR); const char *target = pcmk__xe_get(xml_child, PCMK_XA_TARGET_DIR); const char *options = pcmk__xe_get(xml_child, PCMK_XA_OPTIONS); int flags = pe__bundle_mount_none; if (source == NULL) { source = pcmk__xe_get(xml_child, PCMK_XA_SOURCE_DIR_ROOT); pe__set_bundle_mount_flags(xml_child, flags, pe__bundle_mount_subdir); } if (source && target) { mount_add(bundle_data, source, target, options, flags); if (strcmp(target, "/var/log") == 0) { need_log_mount = FALSE; } } else { pcmk__config_err("Invalid mount directive %s", pcmk__xe_id(xml_child)); } } xml_obj = pcmk__xe_first_child(rsc->priv->xml, PCMK_XE_PRIMITIVE, NULL, NULL); if (xml_obj && valid_network(bundle_data)) { const char *suffix = NULL; char *value = NULL; xmlNode *xml_set = NULL; xml_resource = pcmk__xe_create(NULL, PCMK_XE_CLONE); /* @COMPAT We no longer use the tag, but we need to keep it as * part of the resource name, so that bundles don't restart in a rolling * upgrade. (It also avoids needing to change regression tests.) */ suffix = (const char *) xml_resource->name; if (bundle_data->promoted_max > 0) { suffix = "master"; } pcmk__xe_set_id(xml_resource, "%s-%s", bundle_data->prefix, suffix); xml_set = pcmk__xe_create(xml_resource, PCMK_XE_META_ATTRIBUTES); pcmk__xe_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_ORDERED, PCMK_VALUE_TRUE); value = pcmk__itoa(bundle_data->nreplicas); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value); free(value); value = pcmk__itoa(bundle_data->nreplicas_per_host); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value); free(value); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_GLOBALLY_UNIQUE, pcmk__btoa(bundle_data->nreplicas_per_host > 1)); if (bundle_data->promoted_max) { crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTABLE, PCMK_VALUE_TRUE); value = pcmk__itoa(bundle_data->promoted_max); crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value); free(value); } //crm_xml_add(xml_obj, PCMK_XA_ID, bundle_data->prefix); pcmk__xml_copy(xml_resource, xml_obj); } else if(xml_obj) { pcmk__config_err("Cannot control %s inside %s without either " PCMK_XA_IP_RANGE_START " or " PCMK_XA_CONTROL_PORT, rsc->id, pcmk__xe_id(xml_obj)); return FALSE; } if(xml_resource) { int lpc = 0; GList *childIter = NULL; pe__bundle_port_t *port = NULL; GString *buffer = NULL; if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc, rsc->priv->scheduler) != pcmk_rc_ok) { return FALSE; } /* Currently, we always map the default authentication key location * into the same location inside the container. * * Ideally, we would respect the host's PCMK_authkey_location, but: * - it may be different on different nodes; * - the actual connection will do extra checking to make sure the key * file exists and is readable, that we can't do here on the DC * - tools such as crm_resource and crm_simulate may not have the same * environment variables as the cluster, causing operation digests to * differ * * Always using the default location inside the container is fine, * because we control the pacemaker_remote environment, and it avoids * having to pass another environment variable to the container. * * @TODO A better solution may be to have only pacemaker_remote use the * environment variable, and have the cluster nodes use a new * cluster option for key location. This would introduce the limitation * of the location being the same on all cluster nodes, but that's * reasonable. */ mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION, DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none); if (need_log_mount) { mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, pe__bundle_mount_subdir); } port = pcmk__assert_alloc(1, sizeof(pe__bundle_port_t)); if(bundle_data->control_port) { port->source = strdup(bundle_data->control_port); } else { /* If we wanted to respect PCMK_remote_port, we could use * crm_default_remote_port() here and elsewhere in this file instead * of DEFAULT_REMOTE_PORT. * * However, it gains nothing, since we control both the container * environment and the connection resource parameters, and the user * can use a different port if desired by setting * PCMK_XA_CONTROL_PORT. */ port->source = pcmk__itoa(DEFAULT_REMOTE_PORT); } port->target = strdup(port->source); bundle_data->ports = g_list_append(bundle_data->ports, port); buffer = g_string_sized_new(1024); for (childIter = bundle_data->child->priv->children; childIter != NULL; childIter = childIter->next) { pcmk__bundle_replica_t *replica = NULL; replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t)); replica->child = childIter->data; pcmk__set_rsc_flags(replica->child, pcmk__rsc_exclusive_probes); replica->offset = lpc++; // Ensure the child's notify gets set based on the underlying primitive's value if (pcmk_is_set(replica->child->flags, pcmk__rsc_notify)) { pcmk__set_rsc_flags(bundle_data->child, pcmk__rsc_notify); } allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); bundle_data->attribute_target = g_hash_table_lookup(replica->child->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET); } bundle_data->container_host_options = g_string_free(buffer, FALSE); if (bundle_data->attribute_target) { pcmk__insert_dup(rsc->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, bundle_data->attribute_target); pcmk__insert_dup(bundle_data->child->priv->meta, PCMK_META_CONTAINER_ATTRIBUTE_TARGET, bundle_data->attribute_target); } } else { // Just a naked container, no pacemaker-remote GString *buffer = g_string_sized_new(1024); for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) { pcmk__bundle_replica_t *replica = NULL; replica = pcmk__assert_alloc(1, sizeof(pcmk__bundle_replica_t)); replica->offset = lpc; allocate_ip(bundle_data, replica, buffer); bundle_data->replicas = g_list_append(bundle_data->replicas, replica); } bundle_data->container_host_options = g_string_free(buffer, FALSE); } for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) { pcmk__config_err("Failed unpacking resource %s", rsc->id); pcmk__free_resource(rsc); return FALSE; } /* Utilization needs special handling for bundles. It makes no sense for * the inner primitive to have utilization, because it is tied * one-to-one to the guest node created by the container resource -- and * there's no way to set capacities for that guest node anyway. * * What the user really wants is to configure utilization for the * container. However, the schema only allows utilization for * primitives, and the container resource is implicit anyway, so the * user can *only* configure utilization for the inner primitive. If * they do, move the primitive's utilization values to the container. * * @TODO This means that bundles without an inner primitive can't have * utilization. An alternative might be to allow utilization values in * the top-level bundle XML in the schema, and copy those to each * container. */ if (replica->child != NULL) { GHashTable *empty = replica->container->priv->utilization; replica->container->priv->utilization = replica->child->priv->utilization; replica->child->priv->utilization = empty; } } if (bundle_data->child) { rsc->priv->children = g_list_append(rsc->priv->children, bundle_data->child); } return TRUE; } static int replica_resource_active(pcmk_resource_t *rsc, gboolean all) { if (rsc) { gboolean child_active = rsc->priv->fns->active(rsc, all); if (child_active && !all) { return TRUE; } else if (!child_active && all) { return FALSE; } } return -1; } bool pe__bundle_active(const pcmk_resource_t *rsc, bool all) { pe__bundle_variant_data_t *bundle_data = NULL; GList *iter = NULL; get_bundle_variant_data(bundle_data, rsc); for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; int rsc_active; rsc_active = replica_resource_active(replica->ip, all); if (rsc_active >= 0) { return (bool) rsc_active; } rsc_active = replica_resource_active(replica->child, all); if (rsc_active >= 0) { return (bool) rsc_active; } rsc_active = replica_resource_active(replica->container, all); if (rsc_active >= 0) { return (bool) rsc_active; } rsc_active = replica_resource_active(replica->remote, all); if (rsc_active >= 0) { return (bool) rsc_active; } } /* If "all" is TRUE, we've already checked that no resources were inactive, * so return TRUE; if "all" is FALSE, we didn't find any active resources, * so return FALSE. */ return all; } /*! * \internal * \brief Find the bundle replica corresponding to a given node * * \param[in] bundle Top-level bundle resource * \param[in] node Node to search for * * \return Bundle replica if found, NULL otherwise */ pcmk_resource_t * pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node) { pe__bundle_variant_data_t *bundle_data = NULL; pcmk__assert((bundle != NULL) && (node != NULL)); get_bundle_variant_data(bundle_data, bundle); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk__assert((replica != NULL) && (replica->node != NULL)); if (pcmk__same_node(replica->node, node)) { return replica->child; } } return NULL; } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_xml(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; gboolean printed_header = FALSE; bool print_everything = true; const char *desc = NULL; pcmk__assert(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->priv->fns->is_filtered(rsc, only_rsc, true)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; char *id = NULL; gboolean print_ip, print_child, print_ctnr, print_remote; pcmk__assert(replica != NULL); if (pcmk__rsc_filtered_by_node(container, only_node)) { continue; } print_ip = (ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, print_everything); print_child = (child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, print_everything); print_ctnr = !container->priv->fns->is_filtered(container, only_rsc, print_everything); print_remote = (remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, print_everything); if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) { continue; } if (!printed_header) { const char *type = container_agent_str(bundle_data->agent_type); const char *unique = pcmk__flag_text(rsc->flags, pcmk__rsc_unique); const char *maintenance = pcmk__flag_text(rsc->flags, pcmk__rsc_maintenance); const char *managed = pcmk__flag_text(rsc->flags, pcmk__rsc_managed); const char *failed = pcmk__flag_text(rsc->flags, pcmk__rsc_failed); printed_header = TRUE; desc = pe__resource_description(rsc, show_opts); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_BUNDLE, PCMK_XA_ID, rsc->id, PCMK_XA_TYPE, type, PCMK_XA_IMAGE, bundle_data->image, PCMK_XA_UNIQUE, unique, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_MANAGED, managed, PCMK_XA_FAILED, failed, PCMK_XA_DESCRIPTION, desc, NULL); pcmk__assert(rc == pcmk_rc_ok); } id = pcmk__itoa(replica->offset); rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_REPLICA, PCMK_XA_ID, id, NULL); free(id); pcmk__assert(rc == pcmk_rc_ok); if (print_ip) { out->message(out, (const char *) ip->priv->xml->name, show_opts, ip, only_node, only_rsc); } if (print_child) { out->message(out, (const char *) child->priv->xml->name, show_opts, child, only_node, only_rsc); } if (print_ctnr) { out->message(out, (const char *) container->priv->xml->name, show_opts, container, only_node, only_rsc); } if (print_remote) { out->message(out, (const char *) remote->priv->xml->name, show_opts, remote, only_node, only_rsc); } pcmk__output_xml_pop_parent(out); // replica } if (printed_header) { pcmk__output_xml_pop_parent(out); // bundle } return rc; } static void pe__bundle_replica_output_html(pcmk__output_t *out, pcmk__bundle_replica_t *replica, pcmk_node_t *node, uint32_t show_opts) { pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_html(out, rsc, buffer, node, show_opts); } /*! * \internal * \brief Get a string describing a resource's unmanaged state or lack thereof * * \param[in] rsc Resource to describe * * \return A string indicating that a resource is in maintenance mode or * otherwise unmanaged, or an empty string otherwise */ static const char * get_unmanaged_str(const pcmk_resource_t *rsc) { if (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)) { return " (maintenance)"; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { return " (unmanaged)"; } return ""; } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_html(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; bool print_everything = true; pcmk__assert(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); desc = pe__resource_description(rsc, show_opts); if (rsc->priv->fns->is_filtered(rsc, only_rsc, true)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; gboolean print_ip, print_child, print_ctnr, print_remote; pcmk__assert(replica != NULL); if (pcmk__rsc_filtered_by_node(container, only_node)) { continue; } print_ip = (ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, print_everything); print_child = (child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, print_everything); print_ctnr = !container->priv->fns->is_filtered(container, only_rsc, print_everything); print_remote = (remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset); } if (print_ip) { out->message(out, (const char *) ip->priv->xml->name, new_show_opts, ip, only_node, only_rsc); } if (print_child) { out->message(out, (const char *) child->priv->xml->name, new_show_opts, child, only_node, only_rsc); } if (print_ctnr) { out->message(out, (const char *) container->priv->xml->name, new_show_opts, container, only_node, only_rsc); } if (print_remote) { out->message(out, (const char *) remote->priv->xml->name, new_show_opts, remote, only_node, only_rsc); } if (pcmk__list_of_multiple(bundle_data->replicas)) { out->end_list(out); } } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_html(out, replica, pcmk__current_node(container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void pe__bundle_replica_output_text(pcmk__output_t *out, pcmk__bundle_replica_t *replica, pcmk_node_t *node, uint32_t show_opts) { const pcmk_resource_t *rsc = replica->child; int offset = 0; char buffer[LINE_MAX]; if(rsc == NULL) { rsc = replica->container; } if (replica->remote) { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->remote)); } else { offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(replica->container)); } if (replica->ipaddr) { offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", replica->ipaddr); } pe__common_output_text(out, rsc, buffer, node, show_opts); } PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *", "GList *") int pe__bundle_text(pcmk__output_t *out, va_list args) { uint32_t show_opts = va_arg(args, uint32_t); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); const char *desc = NULL; pe__bundle_variant_data_t *bundle_data = NULL; int rc = pcmk_rc_no_output; bool print_everything = true; desc = pe__resource_description(rsc, show_opts); pcmk__assert(rsc != NULL); get_bundle_variant_data(bundle_data, rsc); if (rsc->priv->fns->is_filtered(rsc, only_rsc, true)) { return rc; } print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; gboolean print_ip, print_child, print_ctnr, print_remote; pcmk__assert(replica != NULL); if (pcmk__rsc_filtered_by_node(container, only_node)) { continue; } print_ip = (ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, print_everything); print_child = (child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, print_everything); print_ctnr = !container->priv->fns->is_filtered(container, only_rsc, print_everything); print_remote = (remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, print_everything); if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) || (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) { /* The text output messages used below require pe_print_implicit to * be set to do anything. */ uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs; PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); if (pcmk__list_of_multiple(bundle_data->replicas)) { out->list_item(out, NULL, "Replica[%d]", replica->offset); } out->begin_list(out, NULL, NULL, NULL); if (print_ip) { out->message(out, (const char *) ip->priv->xml->name, new_show_opts, ip, only_node, only_rsc); } if (print_child) { out->message(out, (const char *) child->priv->xml->name, new_show_opts, child, only_node, only_rsc); } if (print_ctnr) { out->message(out, (const char *) container->priv->xml->name, new_show_opts, container, only_node, only_rsc); } if (print_remote) { out->message(out, (const char *) remote->priv->xml->name, new_show_opts, remote, only_node, only_rsc); } out->end_list(out); } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) { continue; } else { PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s", (bundle_data->nreplicas > 1)? " set" : "", rsc->id, bundle_data->image, pcmk_is_set(rsc->flags, pcmk__rsc_unique)? " (unique)" : "", desc ? " (" : "", desc ? desc : "", desc ? ")" : "", get_unmanaged_str(rsc)); pe__bundle_replica_output_text(out, replica, pcmk__current_node(container), show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void free_bundle_replica(pcmk__bundle_replica_t *replica) { if (replica == NULL) { return; } pcmk__free_node_copy(replica->node); replica->node = NULL; if (replica->ip) { pcmk__xml_free(replica->ip->priv->xml); replica->ip->priv->xml = NULL; pcmk__free_resource(replica->ip); } if (replica->container) { pcmk__xml_free(replica->container->priv->xml); replica->container->priv->xml = NULL; pcmk__free_resource(replica->container); } if (replica->remote) { pcmk__xml_free(replica->remote->priv->xml); replica->remote->priv->xml = NULL; pcmk__free_resource(replica->remote); } free(replica->ipaddr); free(replica); } void pe__free_bundle(pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; CRM_CHECK(rsc != NULL, return); get_bundle_variant_data(bundle_data, rsc); pcmk__rsc_trace(rsc, "Freeing %s", rsc->id); free(bundle_data->prefix); free(bundle_data->image); free(bundle_data->control_port); free(bundle_data->host_network); free(bundle_data->host_netmask); free(bundle_data->ip_range_start); free(bundle_data->container_network); free(bundle_data->launcher_options); free(bundle_data->container_command); g_free(bundle_data->container_host_options); g_list_free_full(bundle_data->replicas, (GDestroyNotify) free_bundle_replica); g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free); g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free); g_list_free(rsc->priv->children); if(bundle_data->child) { pcmk__xml_free(bundle_data->child->priv->xml); bundle_data->child->priv->xml = NULL; pcmk__free_resource(bundle_data->child); } common_free(rsc); } enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc, bool current) { enum rsc_role_e container_role = pcmk_role_unknown; return container_role; } /*! * \brief Get the number of configured replicas in a bundle * * \param[in] rsc Bundle resource * * \return Number of configured replicas, or 0 on error */ int pe_bundle_replicas(const pcmk_resource_t *rsc) { if (pcmk__is_bundle(rsc)) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); return bundle_data->nreplicas; } return 0; } void pe__count_bundle(pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); for (GList *item = bundle_data->replicas; item != NULL; item = item->next) { pcmk__bundle_replica_t *replica = item->data; if (replica->ip) { replica->ip->priv->fns->count(replica->ip); } if (replica->child) { replica->child->priv->fns->count(replica->child); } if (replica->container) { replica->container->priv->fns->count(replica->container); } if (replica->remote) { replica->remote->priv->fns->count(replica->remote); } } } bool pe__bundle_is_filtered(const pcmk_resource_t *rsc, const GList *only_rsc, bool check_parent) { bool passes = false; pe__bundle_variant_data_t *bundle_data = NULL; if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) { passes = true; } else { get_bundle_variant_data(bundle_data, rsc); for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) { pcmk__bundle_replica_t *replica = gIter->data; pcmk_resource_t *ip = replica->ip; pcmk_resource_t *child = replica->child; pcmk_resource_t *container = replica->container; pcmk_resource_t *remote = replica->remote; if ((ip != NULL) && !ip->priv->fns->is_filtered(ip, only_rsc, false)) { passes = true; break; } if ((child != NULL) && !child->priv->fns->is_filtered(child, only_rsc, false)) { passes = true; break; } if (!container->priv->fns->is_filtered(container, only_rsc, false)) { passes = true; break; } if ((remote != NULL) && !remote->priv->fns->is_filtered(remote, only_rsc, false)) { passes = true; break; } } } return !passes; } /*! * \internal * \brief Get a list of a bundle's containers * * \param[in] bundle Bundle resource * * \return Newly created list of \p bundle's containers * \note It is the caller's responsibility to free the result with * g_list_free(). */ GList * pe__bundle_containers(const pcmk_resource_t *bundle) { /* @TODO It would be more efficient to do this once when unpacking the * bundle, creating a new GList* in the variant data */ GList *containers = NULL; const pe__bundle_variant_data_t *data = NULL; get_bundle_variant_data(data, bundle); for (GList *iter = data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; containers = g_list_append(containers, replica->container); } return containers; } // Bundle implementation of pcmk__rsc_methods_t:active_node() pcmk_node_t * pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean) { pcmk_node_t *active = NULL; pcmk_node_t *node = NULL; pcmk_resource_t *container = NULL; GList *containers = NULL; GList *iter = NULL; GHashTable *nodes = NULL; const pe__bundle_variant_data_t *data = NULL; if (count_all != NULL) { *count_all = 0; } if (count_clean != NULL) { *count_clean = 0; } if (rsc == NULL) { return NULL; } /* For the purposes of this method, we only care about where the bundle's * containers are active, so build a list of active containers. */ get_bundle_variant_data(data, rsc); for (iter = data->replicas; iter != NULL; iter = iter->next) { pcmk__bundle_replica_t *replica = iter->data; if (replica->container->priv->active_nodes != NULL) { containers = g_list_append(containers, replica->container); } } if (containers == NULL) { return NULL; } /* If the bundle has only a single active container, just use that * container's method. If live migration is ever supported for bundle * containers, this will allow us to prefer the migration source when there * is only one container and it is migrating. For now, this just lets us * avoid creating the nodes table. */ if (pcmk__list_of_1(containers)) { container = containers->data; node = container->priv->fns->active_node(container, count_all, count_clean); g_list_free(containers); return node; } // Add all containers' active nodes to a hash table (for uniqueness) nodes = g_hash_table_new(NULL, NULL); for (iter = containers; iter != NULL; iter = iter->next) { container = iter->data; for (GList *node_iter = container->priv->active_nodes; node_iter != NULL; node_iter = node_iter->next) { node = node_iter->data; // If insert returns true, we haven't counted this node yet if (g_hash_table_insert(nodes, (gpointer) node->details, (gpointer) node) && !pe__count_active_node(rsc, node, &active, count_all, count_clean)) { goto done; } } } done: g_list_free(containers); g_hash_table_destroy(nodes); return active; } /*! * \internal * \brief Get maximum bundle resource instances per node * * \param[in] rsc Bundle resource to check * * \return Maximum number of \p rsc instances that can be active on one node */ unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc) { pe__bundle_variant_data_t *bundle_data = NULL; get_bundle_variant_data(bundle_data, rsc); pcmk__assert(bundle_data->nreplicas_per_host >= 0); return (unsigned int) bundle_data->nreplicas_per_host; } diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index 5506a95a9a..8634399c77 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -1,2580 +1,2580 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include // bool, true, false #include #include #include #include // xmlNode #include // xmlXPathObject, etc. #include #include #include #include #include /*! * \internal * \brief Resource with list of node info objects for its active nodes */ struct rsc_node_info { const pcmk_resource_t *rsc; //!< Resource //! Nodes where \c rsc is active (list of node_info_t *) GList *list; }; /*! * \internal * \brief Prepend a given node to a resource node info object's list * * \param[in] data Node to prepend (const pcmk_node_t *) * \param[in,out] user_data Resource node info object whose list to prepend to * (struct rsc_node_info * * * \note This is suitable for use with \c g_list_foreach(). */ static void prepend_node_info(gpointer data, gpointer user_data) { const pcmk_node_t *node = data; struct rsc_node_info *rni = user_data; node_info_t *ni = NULL; pcmk__assert(rni->rsc != NULL); ni = pcmk__assert_alloc(1, sizeof(node_info_t)); ni->node_name = node->priv->name; ni->promoted = pcmk_is_set(rni->rsc->flags, pcmk__rsc_promotable) && (rni->rsc->priv->fns->state(rni->rsc, true) == pcmk_role_promoted); rni->list = g_list_prepend(rni->list, ni); } GList * cli_resource_search(const pcmk_resource_t *rsc, const char *requested_name) { const pcmk_resource_t *clone = NULL; struct rsc_node_info rni = { .rsc = rsc, .list = NULL, }; pcmk__assert(rsc != NULL); if (pcmk__is_clone(rsc)) { clone = rsc; } else { const pcmk_resource_t *parent = pe__const_top_resource(rsc, false); if (pcmk__is_clone(parent) && !pcmk_is_set(rsc->flags, pcmk__rsc_unique) && (rsc->priv->history_id != NULL) && pcmk__str_eq(requested_name, rsc->priv->history_id, pcmk__str_none) && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_none)) { // The anonymous clone children's common ID is supplied clone = parent; } } if (clone == NULL) { g_list_foreach(rsc->priv->active_nodes, prepend_node_info, &rni); return rni.list; } for (const GList *iter = clone->priv->children; iter != NULL; iter = iter->next) { const pcmk_resource_t *child = iter->data; rni.rsc = child; g_list_foreach(child->priv->active_nodes, prepend_node_info, &rni); } return rni.list; } // \return Standard Pacemaker return code static int find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr, const char *rsc, const char *attr_set_type, const char *set_name, const char *attr_id, const char *attr_name, xmlNode **result) { xmlNode *xml_search; int rc = pcmk_rc_ok; GString *xpath = NULL; const char *xpath_base = NULL; if (result) { *result = NULL; } if(the_cib == NULL) { return ENOTCONN; } xpath_base = pcmk_cib_xpath_for(PCMK_XE_RESOURCES); if (xpath_base == NULL) { crm_err(PCMK_XE_RESOURCES " CIB element not known (bug?)"); return ENOMSG; } xpath = g_string_sized_new(1024); pcmk__g_strcat(xpath, xpath_base, "//*[@" PCMK_XA_ID "=\"", rsc, "\"]", NULL); if (attr_set_type != NULL) { pcmk__g_strcat(xpath, "/", attr_set_type, NULL); if (set_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "=\"", set_name, "\"]", NULL); } } g_string_append(xpath, "//" PCMK_XE_NVPAIR); if (attr_id != NULL && attr_name!= NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "' " "and @" PCMK_XA_NAME "='", attr_name, "']", NULL); } else if (attr_id != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_ID "='", attr_id, "']", NULL); } else if (attr_name != NULL) { pcmk__g_strcat(xpath, "[@" PCMK_XA_NAME "='", attr_name, "']", NULL); } rc = the_cib->cmds->query(the_cib, xpath->str, &xml_search, cib_sync_call|cib_xpath); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { crm_log_xml_debug(xml_search, "Match"); if (xml_search->children != NULL) { rc = ENOTUNIQ; pcmk__warn_multiple_name_matches(out, xml_search, attr_name); out->spacer(out); } } if (result) { *result = xml_search; } else { pcmk__xml_free(xml_search); } g_string_free(xpath, TRUE); return rc; } /* PRIVATE. Use the find_matching_attr_resources instead. */ static void find_matching_attr_resources_recursive(pcmk__output_t *out, GList /* */ **result, pcmk_resource_t *rsc, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, int depth) { int rc = pcmk_rc_ok; char *lookup_id = clone_strip(rsc->id); for (GList *gIter = rsc->priv->children; gIter != NULL; gIter = gIter->next) { find_matching_attr_resources_recursive(out, result, (pcmk_resource_t *) gIter->data, attr_set, attr_set_type, attr_id, attr_name, cib, depth+1); /* do it only once for clones */ if (pcmk__is_clone(rsc)) { break; } } rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); /* Post-order traversal. * The root is always on the list and it is the last item. */ if((0 == depth) || (pcmk_rc_ok == rc)) { /* push the head */ *result = g_list_append(*result, rsc); } free(lookup_id); } /* The result is a linearized pre-ordered tree of resources. */ static GList/**/ * find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc, const char * rsc_id, const char * attr_set, const char * attr_set_type, const char * attr_id, const char * attr_name, cib_t * cib, const char * cmd, gboolean force) { int rc = pcmk_rc_ok; char *lookup_id = NULL; GList * result = NULL; /* If --force is used, update only the requested resource (clone or primitive). * Otherwise, if the primitive has the attribute, use that. * Otherwise use the clone. */ if(force == TRUE) { return g_list_append(result, rsc); } if (pcmk__is_clone(rsc->priv->parent)) { int rc = find_resource_attr(out, cib, PCMK_XA_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc != pcmk_rc_ok) { rsc = rsc->priv->parent; out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'", cmd, attr_name, rsc->id, rsc_id); } return g_list_append(result, rsc); } else if ((rsc->priv->parent == NULL) && (rsc->priv->children != NULL) && pcmk__is_clone(rsc)) { pcmk_resource_t *child = rsc->priv->children->data; if (pcmk__is_primitive(child)) { lookup_id = clone_strip(child->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, NULL); if(rc == pcmk_rc_ok) { rsc = child; out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'", attr_name, lookup_id, cmd, rsc_id); } free(lookup_id); } return g_list_append(result, rsc); } /* If the resource is a group ==> children inherit the attribute if defined. */ find_matching_attr_resources_recursive(out, &result, rsc, attr_set, attr_set_type, attr_id, attr_name, cib, 0); return result; } /*! * \internal * \brief Get a resource's XML by resource ID from a given CIB XML tree * * \param[in] cib_xml CIB XML to search * \param[in] rsc Resource whose XML to get * * \return Subtree of \p cib_xml belonging to \p rsc, or \c NULL if not found */ static xmlNode * get_cib_rsc(xmlNode *cib_xml, const pcmk_resource_t *rsc) { char *xpath = crm_strdup_printf("%s//*[@" PCMK_XA_ID "='%s']", pcmk_cib_xpath_for(PCMK_XE_RESOURCES), pcmk__xe_id(rsc->priv->xml)); xmlNode *rsc_xml = pcmk__xpath_find_one(cib_xml->doc, xpath, LOG_ERR); free(xpath); return rsc_xml; } static int update_element_attribute(pcmk__output_t *out, pcmk_resource_t *rsc, cib_t *cib, xmlNode *cib_xml_orig, const char *attr_name, const char *attr_value) { int rc = pcmk_rc_ok; xmlNode *rsc_xml = rsc->priv->xml; rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { return ENXIO; } crm_xml_add(rsc_xml, attr_name, attr_value); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Set attribute: " PCMK_XA_NAME "=%s value=%s", attr_name, attr_value); } return rc; } static int resources_with_attr(pcmk__output_t *out, cib_t *cib, pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *top_id, gboolean force, GList **resources) { if (pcmk__str_eq(attr_set_type, PCMK_XE_INSTANCE_ATTRIBUTES, pcmk__str_casei)) { if (!force) { xmlNode *xml_search = NULL; int rc = pcmk_rc_ok; rc = find_resource_attr(out, cib, PCMK_XA_ID, top_id, PCMK_XE_META_ATTRIBUTES, attr_set, attr_id, attr_name, &xml_search); if (rc == pcmk_rc_ok || rc == ENOTUNIQ) { char *found_attr_id = NULL; - found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); + found_attr_id = pcmk__xe_get_copy(xml_search, PCMK_XA_ID); if (!out->is_quiet(out)) { out->err(out, "WARNING: There is already a meta attribute " "for '%s' called '%s' (id=%s)", top_id, attr_name, found_attr_id); out->err(out, " Delete '%s' first or use the force option " "to override", found_attr_id); } free(found_attr_id); pcmk__xml_free(xml_search); return ENOTUNIQ; } pcmk__xml_free(xml_search); } *resources = g_list_append(*resources, rsc); } else { *resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "update", force); } /* If the user specified attr_set or attr_id, the intent is to modify a * single resource, which will be the last item in the list. */ if ((attr_set != NULL) || (attr_id != NULL)) { GList *last = g_list_last(*resources); *resources = g_list_remove_link(*resources, last); g_list_free(*resources); *resources = last; } return pcmk_rc_ok; } static void free_attr_update_data(gpointer data) { attr_update_data_t *ud = data; if (ud == NULL) { return; } free(ud->attr_set_type); free(ud->attr_set_id); free(ud->attr_name); free(ud->attr_value); free(ud->given_rsc_id); free(ud->found_attr_id); free(ud); } static int update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force, GList **results) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; const char *top_id = pe__const_top_resource(rsc, false)->id; if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, top_id, NULL, NULL, NULL, attr_name, NULL); } rc = resources_with_attr(out, cib, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, top_id, force, &resources); if (rc != pcmk_rc_ok) { return rc; } for (GList *iter = resources; iter != NULL; iter = iter->next) { // @TODO Functionize loop body to simplify freeing allocated memory char *lookup_id = NULL; char *local_attr_set = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; const char *rsc_attr_set = attr_set; xmlNode *rsc_xml = rsc->priv->xml; xmlNode *xml_top = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; rsc = (pcmk_resource_t *) iter->data; lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &xml_search); switch (rc) { case pcmk_rc_ok: - found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); + found_attr_id = pcmk__xe_get_copy(xml_search, PCMK_XA_ID); crm_debug("Found a match for " PCMK_XA_NAME "='%s': " PCMK_XA_ID "='%s'", attr_name, found_attr_id); rsc_attr_id = found_attr_id; break; case ENXIO: if (rsc_attr_set == NULL) { local_attr_set = crm_strdup_printf("%s-%s", lookup_id, attr_set_type); rsc_attr_set = local_attr_set; } if (rsc_attr_id == NULL) { found_attr_id = crm_strdup_printf("%s-%s", rsc_attr_set, attr_name); rsc_attr_id = found_attr_id; } rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { /* @TODO Warn and continue through the rest of the resources * and return the error at the end? This should never * happen, but if it does, then we could have a partial * update. */ free(lookup_id); free(found_attr_id); pcmk__xml_free(xml_search); g_list_free(resources); return ENXIO; } xml_top = pcmk__xe_create(NULL, (const char *) rsc_xml->name); crm_xml_add(xml_top, PCMK_XA_ID, lookup_id); xml_obj = pcmk__xe_create(xml_top, attr_set_type); crm_xml_add(xml_obj, PCMK_XA_ID, rsc_attr_set); break; default: free(lookup_id); free(found_attr_id); pcmk__xml_free(xml_search); g_list_free(resources); return rc; } xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name, attr_value); if (xml_top == NULL) { xml_top = xml_obj; } crm_log_xml_debug(xml_top, "Update"); rc = cib->cmds->modify(cib, PCMK_XE_RESOURCES, xml_top, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { attr_update_data_t *ud = pcmk__assert_alloc(1, sizeof(attr_update_data_t)); if (attr_set_type == NULL) { attr_set_type = (const char *) xml_search->parent->name; } if (rsc_attr_set == NULL) { rsc_attr_set = pcmk__xe_get(xml_search->parent, PCMK_XA_ID); } ud->attr_set_type = pcmk__str_copy(attr_set_type); ud->attr_set_id = pcmk__str_copy(rsc_attr_set); ud->attr_name = pcmk__str_copy(attr_name); ud->attr_value = pcmk__str_copy(attr_value); ud->given_rsc_id = pcmk__str_copy(lookup_id); ud->found_attr_id = pcmk__str_copy(found_attr_id); ud->rsc = rsc; *results = g_list_append(*results, ud); } pcmk__xml_free(xml_top); pcmk__xml_free(xml_search); free(lookup_id); free(found_attr_id); free(local_attr_set); if (recursive && pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_casei)) { /* We want to set the attribute only on resources explicitly * colocated with this one, so we use * rsc->priv->with_this_colocations directly rather than the * with_this_colocations() method. */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->with_this_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; crm_debug("Checking %s %d", cons->id, cons->score); if (pcmk_is_set(cons->dependent->flags, pcmk__rsc_detect_loop) || (cons->score <= 0)) { continue; } crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, cons->dependent->id); update_attribute(cons->dependent, cons->dependent->id, NULL, attr_set_type, NULL, attr_name, attr_value, recursive, cib, cib_xml_orig, force, results); } } } g_list_free(resources); return rc; } // \return Standard Pacemaker return code int cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, const char *attr_value, gboolean recursive, cib_t *cib, xmlNode *cib_xml_orig, gboolean force) { static bool need_init = true; int rc = pcmk_rc_ok; GList *results = NULL; pcmk__output_t *out = rsc->priv->scheduler->priv->out; pcmk__assert(cib_xml_orig != NULL); /* If we were asked to update the attribute in a resource element (for * instance, ) there's really not much we need to do. */ if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { return update_element_attribute(out, rsc, cib, cib_xml_orig, attr_name, attr_value); } /* One time initialization - clear flags so we can detect loops */ if (need_init) { need_init = false; pcmk__unpack_constraints(rsc->priv->scheduler); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); } rc = update_attribute(rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, attr_value, recursive, cib, cib_xml_orig, force, &results); if (rc == pcmk_rc_ok) { if (results == NULL) { return rc; } out->message(out, "attribute-changed-list", results); g_list_free_full(results, free_attr_update_data); } return rc; } // \return Standard Pacemaker return code int cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name, const char *attr_set, const char *attr_set_type, const char *attr_id, const char *attr_name, cib_t *cib, xmlNode *cib_xml_orig, gboolean force) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; int rc = pcmk_rc_ok; GList/**/ *resources = NULL; pcmk__assert((cib != NULL) && (cib_xml_orig != NULL)); if ((attr_id == NULL) && !force) { find_resource_attr(out, cib, PCMK_XA_ID, pe__const_top_resource(rsc, false)->id, NULL, NULL, NULL, attr_name, NULL); } if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) { xmlNode *rsc_xml = rsc->priv->xml; rsc_xml = get_cib_rsc(cib_xml_orig, rsc); if (rsc_xml == NULL) { return ENXIO; } pcmk__xe_remove_attr(rsc_xml, attr_name); rc = cib->cmds->replace(cib, PCMK_XE_RESOURCES, rsc_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted attribute: %s", attr_name); } return rc; } if (pcmk__str_eq(attr_set_type, PCMK_XE_META_ATTRIBUTES, pcmk__str_none)) { resources = find_matching_attr_resources(out, rsc, requested_name, attr_set, attr_set_type, attr_id, attr_name, cib, "delete", force); } else { resources = g_list_append(resources, rsc); } for (GList *iter = resources; iter != NULL; iter = iter->next) { char *lookup_id = NULL; xmlNode *xml_obj = NULL; xmlNode *xml_search = NULL; char *found_attr_id = NULL; const char *rsc_attr_id = attr_id; rsc = (pcmk_resource_t *) iter->data; /* @TODO Search the original CIB in find_resource_attr() for * future-proofing, to ensure that we're getting IDs of nvpairs that * exist in the CIB. */ lookup_id = clone_strip(rsc->id); rc = find_resource_attr(out, cib, PCMK_XA_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &xml_search); switch (rc) { case pcmk_rc_ok: - found_attr_id = crm_element_value_copy(xml_search, PCMK_XA_ID); + found_attr_id = pcmk__xe_get_copy(xml_search, PCMK_XA_ID); pcmk__xml_free(xml_search); break; case ENXIO: free(lookup_id); pcmk__xml_free(xml_search); continue; default: free(lookup_id); pcmk__xml_free(xml_search); g_list_free(resources); return rc; } if (rsc_attr_id == NULL) { rsc_attr_id = found_attr_id; } xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL); crm_log_xml_debug(xml_obj, "Delete"); rc = cib->cmds->remove(cib, PCMK_XE_RESOURCES, xml_obj, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc == pcmk_rc_ok) { out->info(out, "Deleted '%s' option: " PCMK_XA_ID "=%s%s%s%s%s", lookup_id, found_attr_id, ((attr_set == NULL)? "" : " set="), pcmk__s(attr_set, ""), ((attr_name == NULL)? "" : " " PCMK_XA_NAME "="), pcmk__s(attr_name, "")); } free(lookup_id); pcmk__xml_free(xml_obj); free(found_attr_id); } g_list_free(resources); return rc; } // \return Standard Pacemaker return code static int send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *node) { pcmk__output_t *out = NULL; const char *rsc_api_id = NULL; const char *rsc_long_id = NULL; const char *rsc_class = NULL; const char *rsc_provider = NULL; const char *rsc_type = NULL; const char *router_node = NULL; bool cib_only = false; pcmk__assert((rsc != NULL) && (rsc_id != NULL) && (node != NULL)); out = rsc->priv->scheduler->priv->out; if (!pcmk__is_primitive(rsc)) { out->err(out, "We can only process primitive resources, not %s", rsc_id); return EINVAL; } rsc_class = pcmk__xe_get(rsc->priv->xml, PCMK_XA_CLASS); rsc_provider = pcmk__xe_get(rsc->priv->xml, PCMK_XA_PROVIDER); rsc_type = pcmk__xe_get(rsc->priv->xml, PCMK_XA_TYPE); if ((rsc_class == NULL) || (rsc_type == NULL)) { out->err(out, "Resource %s does not have a class and type", rsc_id); return EINVAL; } router_node = node->priv->name; if (!node->details->online) { if (do_fail_resource) { out->err(out, "Node %s is not online", pcmk__node_name(node)); return ENOTCONN; } cib_only = true; } else if (pcmk__is_pacemaker_remote_node(node)) { const pcmk_node_t *conn_host = pcmk__current_node(node->priv->remote); if (conn_host == NULL) { out->err(out, "No cluster connection to Pacemaker Remote node %s " "detected", pcmk__node_name(node)); return ENOTCONN; } router_node = conn_host->priv->name; } if (rsc->priv->history_id != NULL) { rsc_api_id = rsc->priv->history_id; rsc_long_id = rsc->id; } else { rsc_api_id = rsc->id; } if (do_fail_resource) { return pcmk_controld_api_fail(controld_api, node->priv->name, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type); } return pcmk_controld_api_refresh(controld_api, node->priv->name, router_node, rsc_api_id, rsc_long_id, rsc_class, rsc_provider, rsc_type, cib_only); } /*! * \internal * \brief Get resource name as used in failure-related node attributes * * \param[in] rsc Resource to check * * \return Newly allocated string containing resource's fail name * \note The caller is responsible for freeing the result. */ static inline char * rsc_fail_name(const pcmk_resource_t *rsc) { const char *name = pcmk__s(rsc->priv->history_id, rsc->id); if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) { return strdup(name); } return clone_strip(name); } // \return Standard Pacemaker return code static int clear_rsc_history(pcmk_ipc_api_t *controld_api, pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *node) { int rc = pcmk_rc_ok; pcmk__assert((rsc != NULL) && (node != NULL)); /* Erase the resource's entire LRM history in the CIB, even if we're only * clearing a single operation's fail count. If we erased only entries for a * single operation, we might wind up with a wrong idea of the current * resource state, and we might not re-probe the resource. */ rc = send_lrm_rsc_op(controld_api, false, rsc, rsc_id, node); if (rc != pcmk_rc_ok) { return rc; } crm_trace("Processing %d mainloop inputs", pcmk_controld_api_replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", pcmk_controld_api_replies_expected(controld_api)); } return rc; } // \return Standard Pacemaker return code static int clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api, pcmk_node_t *node, const char *rsc_id, const char *operation, const char *interval_spec) { int rc = pcmk_rc_ok; pcmk_scheduler_t *scheduler = NULL; const char *failed_value = NULL; const char *failed_id = NULL; char *interval_ms_s = NULL; GHashTable *rscs = NULL; GHashTableIter iter; pcmk__assert(node != NULL); scheduler = node->priv->scheduler; /* Create a hash table to use as a set of resources to clean. This lets us * clean each resource only once (per node) regardless of how many failed * operations it has. */ rscs = pcmk__strkey_table(NULL, NULL); // Normalize interval to milliseconds for comparison to history entry if (operation) { guint interval_ms = 0U; pcmk_parse_interval_spec(interval_spec, &interval_ms); interval_ms_s = crm_strdup_printf("%u", interval_ms); } for (xmlNode *xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { failed_id = pcmk__xe_get(xml_op, PCMK__XA_RSC_ID); if (failed_id == NULL) { // Malformed history entry, should never happen continue; } // No resource specified means all resources match if (rsc_id) { pcmk_resource_t *fail_rsc = NULL; fail_rsc = pe_find_resource_with_flags(scheduler->priv->resources, failed_id, pcmk_rsc_match_history |pcmk_rsc_match_anon_basename); if ((fail_rsc == NULL) || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_none)) { continue; } } // Host name should always have been provided by this point failed_value = pcmk__xe_get(xml_op, PCMK_XA_UNAME); if (!pcmk__str_eq(node->priv->name, failed_value, pcmk__str_casei)) { continue; } // No operation specified means all operations match if (operation) { failed_value = pcmk__xe_get(xml_op, PCMK_XA_OPERATION); if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) { continue; } // Interval (if operation was specified) defaults to 0 (not all) failed_value = pcmk__xe_get(xml_op, PCMK_META_INTERVAL); if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) { continue; } } g_hash_table_add(rscs, (gpointer) failed_id); } free(interval_ms_s); g_hash_table_iter_init(&iter, rscs); while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) { pcmk_resource_t *rsc = NULL; crm_debug("Erasing failures of %s on %s", failed_id, pcmk__node_name(node)); rsc = pe_find_resource(scheduler->priv->resources, failed_id); if (rsc == NULL) { out->err(out, "Resource %s not found", failed_id); return ENXIO; } rc = clear_rsc_history(controld_api, rsc, failed_id, node); if (rc != pcmk_rc_ok) { return rc; } } g_hash_table_destroy(rscs); return rc; } // \return Standard Pacemaker return code static int clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation, const char *interval_spec, const pcmk_node_t *node) { int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; char *rsc_name = rsc_fail_name(rsc); if (pcmk__is_pacemaker_remote_node(node)) { attr_options |= pcmk__node_attr_remote; } rc = pcmk__attrd_api_clear_failures(NULL, node->priv->name, rsc_name, operation, interval_spec, NULL, attr_options); free(rsc_name); return rc; } // \return Standard Pacemaker return code int cli_resource_delete(pcmk_ipc_api_t *controld_api, pcmk_resource_t *rsc, pcmk_node_t *node, const char *operation, const char *interval_spec, bool just_failures, bool force) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; if (rsc->priv->children != NULL) { for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; rc = cli_resource_delete(controld_api, child, node, operation, interval_spec, just_failures, force); if (rc != pcmk_rc_ok) { return rc; } } return pcmk_rc_ok; } if (node == NULL) { GList *nodes = g_hash_table_get_values(rsc->priv->probed_nodes); if (nodes == NULL) { if (force) { nodes = g_list_copy(scheduler->nodes); } else if (pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)) { GHashTableIter iter; g_hash_table_iter_init(&iter, rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if ((node != NULL) && (node->assign->score >= 0)) { nodes = g_list_prepend(nodes, (gpointer *) node); } } } else { nodes = g_hash_table_get_values(rsc->priv->allowed_nodes); } } for (GList *iter = nodes; iter != NULL; iter = iter->next) { node = (pcmk_node_t *) iter->data; if (!node->details->online) { continue; } rc = cli_resource_delete(controld_api, rsc, node, operation, interval_spec, just_failures, force); if (rc != pcmk_rc_ok) { break; } } g_list_free(nodes); return rc; } if (!pcmk_is_set(node->priv->flags, pcmk__node_probes_allowed)) { out->err(out, "Unable to clean up %s because resource discovery disabled on " "%s", rsc->id, pcmk__node_name(node)); return EOPNOTSUPP; } if (controld_api == NULL) { out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file", rsc->id, pcmk__node_name(node)); return pcmk_rc_ok; } rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up %s failures on %s: %s", rsc->id, pcmk__node_name(node), pcmk_rc_str(rc)); return rc; } if (just_failures) { rc = clear_rsc_failures(out, controld_api, node, rsc->id, operation, interval_spec); } else { rc = clear_rsc_history(controld_api, rsc, rsc->id, node); } if (rc != pcmk_rc_ok) { out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s", rsc->id, pcmk__node_name(node), pcmk_rc_str(rc)); } else { out->info(out, "Cleaned up %s on %s", rsc->id, pcmk__node_name(node)); } return rc; } // \return Standard Pacemaker return code int cli_cleanup_all(pcmk_ipc_api_t *controld_api, pcmk_node_t *node, const char *operation, const char *interval_spec, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; int attr_options = pcmk__node_attr_none; const char *node_name = NULL; const char *log_node_name = "all nodes"; pcmk__assert(scheduler != NULL); out = scheduler->priv->out; if (node != NULL) { node_name = node->priv->name; log_node_name = pcmk__node_name(node); } if (controld_api == NULL) { out->info(out, "Dry run: skipping clean-up of %s due to CIB_file", log_node_name); return rc; } if (pcmk__is_pacemaker_remote_node(node)) { pcmk__set_node_attr_flags(attr_options, pcmk__node_attr_remote); } rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation, interval_spec, NULL, attr_options); if (rc != pcmk_rc_ok) { out->err(out, "Unable to clean up all failures on %s: %s", log_node_name, pcmk_rc_str(rc)); return rc; } if (node != NULL) { rc = clear_rsc_failures(out, controld_api, node, NULL, operation, interval_spec); } else { for (GList *iter = scheduler->nodes; iter; iter = iter->next) { pcmk_node_t *sched_node = iter->data; rc = clear_rsc_failures(out, controld_api, sched_node, NULL, operation, interval_spec); if (rc != pcmk_rc_ok) { break; } } } if (rc == pcmk_rc_ok) { out->info(out, "Cleaned up all resources on %s", log_node_name); } else { // @TODO But didn't clear_rsc_failures() fail? out->err(out, "Cleaned all resource failures on %s, but unable to clean " "history: %s", log_node_name, pcmk_rc_str(rc)); } return rc; } static void check_role(resource_checks_t *checks) { const char *role_s = g_hash_table_lookup(checks->rsc->priv->meta, PCMK_META_TARGET_ROLE); if (role_s == NULL) { return; } switch (pcmk_parse_role(role_s)) { case pcmk_role_stopped: checks->flags |= rsc_remain_stopped; break; case pcmk_role_unpromoted: if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags, pcmk__rsc_promotable)) { checks->flags |= rsc_unpromotable; } break; default: break; } } static void check_managed(resource_checks_t *checks) { const char *managed_s = g_hash_table_lookup(checks->rsc->priv->meta, PCMK_META_IS_MANAGED); if ((managed_s != NULL) && !crm_is_true(managed_s)) { checks->flags |= rsc_unmanaged; } } static void check_locked(resource_checks_t *checks) { const pcmk_node_t *lock_node = checks->rsc->priv->lock_node; if (lock_node != NULL) { checks->flags |= rsc_locked; checks->lock_node = lock_node->priv->name; } } static bool node_is_unhealthy(pcmk_node_t *node) { switch (pe__health_strategy(node->priv->scheduler)) { case pcmk__health_strategy_none: break; case pcmk__health_strategy_no_red: if (pe__node_health(node) < 0) { return true; } break; case pcmk__health_strategy_only_green: if (pe__node_health(node) <= 0) { return true; } break; case pcmk__health_strategy_progressive: case pcmk__health_strategy_custom: /* @TODO These are finite scores, possibly with rules, and possibly * combining with other scores, so attributing these as a cause is * nontrivial. */ break; } return false; } static void check_node_health(resource_checks_t *checks, pcmk_node_t *node) { if (node == NULL) { GHashTableIter iter; bool allowed = false; bool all_nodes_unhealthy = true; g_hash_table_iter_init(&iter, checks->rsc->priv->allowed_nodes); while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) { allowed = true; if (!node_is_unhealthy(node)) { all_nodes_unhealthy = false; break; } } if (allowed && all_nodes_unhealthy) { checks->flags |= rsc_node_health; } } else if (node_is_unhealthy(node)) { checks->flags |= rsc_node_health; } } /* @TODO Make this check all resources if rsc is NULL, so it can be called after * cleanup of all resources */ int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node) { resource_checks_t checks = { .rsc = rsc }; check_role(&checks); check_managed(&checks); check_locked(&checks); check_node_health(&checks, node); return out->message(out, "resource-check-list", &checks); } // \return Standard Pacemaker return code int cli_resource_fail(pcmk_ipc_api_t *controld_api, pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *node) { pcmk__assert((rsc != NULL) && (rsc_id != NULL) && (node != NULL)); if (controld_api == NULL) { pcmk__output_t *out = rsc->priv->scheduler->priv->out; out->err(out, "Dry run: skipping fail of %s on %s due to CIB_file", rsc_id, pcmk__node_name(node)); return pcmk_rc_ok; } crm_notice("Failing %s on %s", rsc_id, pcmk__node_name(node)); return send_lrm_rsc_op(controld_api, true, rsc, rsc_id, node); } static GHashTable * generate_resource_params(pcmk_resource_t *rsc) { GHashTable *params = NULL; GHashTable *meta = NULL; GHashTable *combined = NULL; GHashTableIter iter; char *key = NULL; char *value = NULL; combined = pcmk__strkey_table(free, free); params = pe_rsc_params(rsc, NULL, rsc->priv->scheduler); if (params != NULL) { g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { pcmk__insert_dup(combined, key, value); } } meta = pcmk__strkey_table(free, free); get_meta_attributes(meta, rsc, NULL, rsc->priv->scheduler); if (meta != NULL) { g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); g_hash_table_insert(combined, crm_name, strdup(value)); } g_hash_table_destroy(meta); } return combined; } bool resource_is_running_on(pcmk_resource_t *rsc, const char *host) { bool found = true; GList *hIter = NULL; GList *hosts = NULL; if (rsc == NULL) { return false; } rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) { pcmk_node_t *node = (pcmk_node_t *) hIter->data; if (pcmk__strcase_any_of(host, node->priv->name, node->priv->id, NULL)) { crm_trace("Resource %s is running on %s\n", rsc->id, host); goto done; } } if (host != NULL) { crm_trace("Resource %s is not running on: %s\n", rsc->id, host); found = false; } else if(host == NULL && hosts == NULL) { crm_trace("Resource %s is not running\n", rsc->id); found = false; } done: g_list_free(hosts); return found; } /*! * \internal * \brief Create a list of all resources active on host from a given list * * \param[in] host Name of host to check whether resources are active * \param[in] rsc_list List of resources to check * * \return New list of resources from list that are active on host */ static GList * get_active_resources(const char *host, GList *rsc_list) { GList *rIter = NULL; GList *active = NULL; for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data; /* Expand groups to their members, because if we're restarting a member * other than the first, we can't otherwise tell which resources are * stopping and starting. */ if (pcmk__is_group(rsc)) { GList *member_active = NULL; member_active = get_active_resources(host, rsc->priv->children); active = g_list_concat(active, member_active); } else if (resource_is_running_on(rsc, host)) { active = g_list_append(active, strdup(rsc->id)); } } return active; } static void dump_list(GList *items, const char *tag) { int lpc = 0; GList *item = NULL; for (item = items; item != NULL; item = item->next) { crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data); lpc++; } } static void display_list(pcmk__output_t *out, GList *items, const char *tag) { GList *item = NULL; for (item = items; item != NULL; item = item->next) { out->info(out, "%s%s", tag, (const char *)item->data); } } /*! * \internal * \brief Update scheduler XML input based on a CIB query and the current time * * The CIB XML is upgraded to the latest schema version. * * \param[in,out] out Output object * \param[in,out] scheduler Scheduler data to update * \param[in] cib Connection to the CIB manager * \param[out] cib_xml_orig Where to store CIB XML before any schema * upgrades (can be \c NULL) * * \return Standard Pacemaker return code */ int update_scheduler_input(pcmk__output_t *out, pcmk_scheduler_t *scheduler, cib_t *cib, xmlNode **cib_xml_orig) { xmlNode *queried_xml = NULL; xmlNode *updated_xml = NULL; int rc = pcmk_rc_ok; pcmk__assert((out != NULL) && (scheduler != NULL) && (scheduler->input == NULL) && (scheduler->priv->now == NULL) && (cib != NULL) && ((cib_xml_orig == NULL) || (*cib_xml_orig == NULL))); rc = cib->cmds->query(cib, NULL, &queried_xml, cib_sync_call); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not obtain the current CIB: %s", pcmk_rc_str(rc)); goto done; } if (cib_xml_orig != NULL) { updated_xml = pcmk__xml_copy(NULL, queried_xml); } else { // No need to preserve the pre-upgrade CIB, so don't make a copy updated_xml = queried_xml; queried_xml = NULL; } rc = pcmk__update_configured_schema(&updated_xml, false); if (rc != pcmk_rc_ok) { out->err(out, "Could not upgrade the current CIB XML: %s", pcmk_rc_str(rc)); pcmk__xml_free(updated_xml); goto done; } scheduler->input = updated_xml; scheduler->priv->now = crm_time_new(NULL); done: if ((rc == pcmk_rc_ok) && (cib_xml_orig != NULL)) { *cib_xml_orig = queried_xml; } else { pcmk__xml_free(queried_xml); } return rc; } // \return Standard Pacemaker return code static int update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, xmlNode **cib_xml_orig, bool simulate) { char *pid = NULL; char *shadow_file = NULL; cib_t *shadow_cib = NULL; int rc = pcmk_rc_ok; pcmk__output_t *out = scheduler->priv->out; pcmk_reset_scheduler(scheduler); pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); if(simulate) { bool prev_quiet = false; rc = update_scheduler_input(out, scheduler, cib, NULL); if (rc != pcmk_rc_ok) { goto done; } pid = pcmk__getpid_s(); shadow_cib = cib_shadow_new(pid); shadow_file = get_shadow_file(pid); if (shadow_cib == NULL) { out->err(out, "Could not create shadow cib: '%s'", pid); rc = ENXIO; goto done; } rc = pcmk__xml_write_file(scheduler->input, shadow_file, false); if (rc != pcmk_rc_ok) { out->err(out, "Could not populate shadow cib: %s", pcmk_rc_str(rc)); goto done; } rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { out->err(out, "Could not connect to shadow cib: %s", pcmk_rc_str(rc)); goto done; } pcmk__schedule_actions(scheduler); prev_quiet = out->is_quiet(out); out->quiet = true; pcmk__simulate_transition(scheduler, shadow_cib, NULL); out->quiet = prev_quiet; rc = update_dataset(shadow_cib, scheduler, cib_xml_orig, false); } else { xmlNode *xml = NULL; rc = update_scheduler_input(out, scheduler, cib, &xml); if (rc != pcmk_rc_ok) { goto done; } pcmk__xml_free(*cib_xml_orig); *cib_xml_orig = xml; cluster_status(scheduler); } done: // Do not free scheduler->input because rsc->priv->xml must remain valid cib_delete(shadow_cib); free(pid); if(shadow_file) { unlink(shadow_file); free(shadow_file); } return rc; } /*! * \internal * \brief Find the maximum stop timeout of a resource and its children (if any) * * \param[in,out] rsc Resource to get timeout for * * \return Maximum stop timeout for \p rsc (in milliseconds) */ static guint max_rsc_stop_timeout(pcmk_resource_t *rsc) { long long result_ll; guint max_delay = 0; xmlNode *config = NULL; GHashTable *meta = NULL; if (rsc == NULL) { return 0; } // If resource is collective, use maximum of its children's stop timeouts if (rsc->priv->children != NULL) { for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; guint delay = max_rsc_stop_timeout(child); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Maximum stop timeout for %s is now %s " "due to %s", rsc->id, pcmk__readable_interval(delay), child->id); max_delay = delay; } } return max_delay; } // Get resource's stop action configuration from CIB config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true); /* Get configured timeout for stop action (fully evaluated for rules, * defaults, etc.). * * @TODO This currently ignores node (which might matter for rules) */ meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &result_ll, -1LL) == pcmk_rc_ok) && (result_ll >= 0)) { max_delay = (guint) QB_MIN(result_ll, UINT_MAX); } g_hash_table_destroy(meta); return max_delay; } /*! * \internal * \brief Find a reasonable waiting time for stopping any one resource in a list * * \param[in,out] scheduler Scheduler data * \param[in] resources List of names of resources that will be stopped * * \return Rough estimate of a reasonable time to wait (in seconds) to stop any * one resource in \p resources * \note This estimate is very rough, simply the maximum stop timeout of all * given resources and their children, plus a small fudge factor. It does * not account for children that must be stopped in sequence, action * throttling, or any demotions needed. It checks the stop timeout, even * if the resources in question are actually being started. */ static guint wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources) { guint max_delay = 0U; // Find maximum stop timeout in milliseconds for (const GList *item = resources; item != NULL; item = item->next) { pcmk_resource_t *rsc = pe_find_resource(scheduler->priv->resources, (const char *) item->data); guint delay = max_rsc_stop_timeout(rsc); if (delay > max_delay) { pcmk__rsc_trace(rsc, "Wait time is now %s due to %s", pcmk__readable_interval(delay), rsc->id); max_delay = delay; } } return pcmk__timeout_ms2s(max_delay) + 5; } #define waiting_for_starts(d, r, h) ((d != NULL) || \ (!resource_is_running_on((r), (h)))) /*! * \internal * \brief Restart a resource (on a particular host if requested). * * \param[in,out] out Output object * \param[in,out] rsc The resource to restart * \param[in] node Node to restart resource on (NULL for all) * \param[in] move_lifetime If not NULL, how long constraint should * remain in effect (as ISO 8601 string) * \param[in] timeout_ms Consider failed if actions do not complete * in this time (specified in milliseconds, * but a two-second granularity is actually * used; if 0, it will be calculated based on * the resource timeout) * \param[in,out] cib Connection to the CIB manager * \param[in] promoted_role_only If true, limit to promoted instances * \param[in] force If true, apply only to requested instance * if part of a collective resource * * \return Standard Pacemaker return code (exits on certain failures) */ int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc, const pcmk_node_t *node, const char *move_lifetime, guint timeout_ms, cib_t *cib, gboolean promoted_role_only, gboolean force) { int rc = pcmk_rc_ok; int lpc = 0; int before = 0; guint step_timeout_s = 0; /* @TODO Due to this sleep interval, a timeout <2s will cause problems and * should be rejected */ guint sleep_interval = 2U; guint timeout = pcmk__timeout_ms2s(timeout_ms); bool stop_via_ban = false; char *rsc_id = NULL; char *lookup_id = NULL; char *orig_target_role = NULL; xmlNode *cib_xml_orig = NULL; GList *list_delta = NULL; GList *target_active = NULL; GList *current_active = NULL; GList *restart_target_active = NULL; pcmk_scheduler_t *scheduler = NULL; pcmk_resource_t *parent = uber_parent(rsc); bool running = false; const char *id = pcmk__s(rsc->priv->history_id, rsc->id); const char *host = node ? node->priv->name : NULL; /* If the implicit resource or primitive resource of a bundle is given, operate on the * bundle itself instead. */ if (pcmk__is_bundled(rsc)) { rsc = parent->priv->parent; } running = resource_is_running_on(rsc, host); if (pcmk__is_clone(parent) && !running) { if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } rsc = parent->priv->fns->find_rsc(parent, lookup_id, node, pcmk_rsc_match_basename |pcmk_rsc_match_current_node); free(lookup_id); running = resource_is_running_on(rsc, host); } if (!running) { if (host) { out->err(out, "%s is not running on %s and so cannot be restarted", id, host); } else { out->err(out, "%s is not running anywhere and so cannot be restarted", id); } return ENXIO; } if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) { out->err(out, "Unmanaged resources cannot be restarted."); return EAGAIN; } rsc_id = strdup(rsc->id); if (pcmk__is_unique_clone(parent)) { lookup_id = strdup(rsc->id); } else { lookup_id = clone_strip(rsc->id); } if (host) { if (pcmk__is_clone(rsc) || pe_bundle_replicas(rsc)) { stop_via_ban = true; } else if (pcmk__is_clone(parent)) { stop_via_ban = true; free(lookup_id); lookup_id = strdup(parent->id); } } /* grab full cib determine originally active resources disable or ban poll cib and watch for affected resources to get stopped without --timeout, calculate the stop timeout for each step and wait for that if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down if everything stopped, re-enable or un-ban poll cib and watch for affected resources to get started without --timeout, calculate the start timeout for each step and wait for that if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up report success Optimizations: - use constraints to determine ordered list of affected resources - Allow a --no-deps option (aka. --force-restart) */ scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { rc = errno; out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc)); goto done; } scheduler->priv->out = out; rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc); goto done; } restart_target_active = get_active_resources(host, scheduler->priv->resources); current_active = get_active_resources(host, scheduler->priv->resources); dump_list(current_active, "Origin"); if (stop_via_ban) { /* Stop the clone or bundle instance by banning it from the host */ out->quiet = true; rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); } else { xmlNode *xml_search = NULL; /* Stop the resource by setting PCMK_META_TARGET_ROLE to Stopped. * Remember any existing PCMK_META_TARGET_ROLE so we can restore it * later (though it only makes any difference if it's Unpromoted). */ rc = find_resource_attr(out, cib, PCMK_XA_VALUE, lookup_id, NULL, NULL, NULL, PCMK_META_TARGET_ROLE, &xml_search); if (rc == pcmk_rc_ok) { - orig_target_role = crm_element_value_copy(xml_search, PCMK_XA_VALUE); + orig_target_role = pcmk__xe_get_copy(xml_search, PCMK_XA_VALUE); } pcmk__xml_free(xml_search); rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, PCMK_ACTION_STOPPED, FALSE, cib, cib_xml_orig, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not set " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); if (current_active != NULL) { g_list_free_full(current_active, free); current_active = NULL; } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); restart_target_active = NULL; } goto done; } rc = update_dataset(cib, scheduler, &cib_xml_orig, true); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources would be stopped"); goto failure; } target_active = get_active_resources(host, scheduler->priv->resources); dump_list(target_active, "Target"); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (list_delta != NULL) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%us remaining", timeout); } rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were stopped"); goto failure; } if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(host, scheduler->priv->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); if(before == g_list_length(list_delta)) { /* aborted during stop phase, print the contents of list_delta */ out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } if (stop_via_ban) { rc = cli_resource_clear(lookup_id, host, NULL, cib, true, force); } else if (orig_target_role) { rc = cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_xml_orig, force); free(orig_target_role); orig_target_role = NULL; } else { rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_xml_orig, force); } if(rc != pcmk_rc_ok) { out->err(out, "Could not unset " PCMK_META_TARGET_ROLE " for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc); goto done; } if (target_active != NULL) { g_list_free_full(target_active, free); } target_active = restart_target_active; list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta)); display_list(out, list_delta, " * "); step_timeout_s = timeout / sleep_interval; while (waiting_for_starts(list_delta, rsc, host)) { before = g_list_length(list_delta); if(timeout_ms == 0) { step_timeout_s = wait_time_estimate(scheduler, list_delta) / sleep_interval; } /* We probably don't need the entire step timeout */ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { sleep(sleep_interval); if(timeout) { timeout -= sleep_interval; crm_trace("%ds remaining", timeout); } rc = update_dataset(cib, scheduler, &cib_xml_orig, false); if(rc != pcmk_rc_ok) { out->err(out, "Could not determine which resources were started"); goto failure; } /* It's OK if dependent resources moved to a different node, * so we check active resources on all nodes. */ if (current_active != NULL) { g_list_free_full(current_active, free); } current_active = get_active_resources(NULL, scheduler->priv->resources); g_list_free(list_delta); list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp); dump_list(current_active, "Current"); dump_list(list_delta, "Delta"); } if(before == g_list_length(list_delta)) { /* aborted during start phase, print the contents of list_delta */ out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta)); display_list(out, list_delta, " * "); rc = ETIME; goto failure; } } rc = pcmk_rc_ok; goto done; failure: if (stop_via_ban) { cli_resource_clear(lookup_id, host, NULL, cib, true, force); } else if (orig_target_role) { cli_resource_update_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, orig_target_role, FALSE, cib, cib_xml_orig, force); free(orig_target_role); } else { cli_resource_delete_attribute(rsc, rsc_id, NULL, PCMK_XE_META_ATTRIBUTES, NULL, PCMK_META_TARGET_ROLE, cib, cib_xml_orig, force); } done: if (list_delta != NULL) { g_list_free(list_delta); } if (current_active != NULL) { g_list_free_full(current_active, free); } if (target_active != NULL && (target_active != restart_target_active)) { g_list_free_full(target_active, free); } if (restart_target_active != NULL) { g_list_free_full(restart_target_active, free); } free(rsc_id); free(lookup_id); pcmk_free_scheduler(scheduler); return rc; } static inline bool action_is_pending(const pcmk_action_t *action) { if (pcmk_any_flags_set(action->flags, pcmk__action_optional|pcmk__action_pseudo) || !pcmk_is_set(action->flags, pcmk__action_runnable) || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) { return false; } return true; } /*! * \internal * \brief Check whether any actions in a list are pending * * \param[in] actions List of actions to check * * \return true if any actions in the list are pending, otherwise false */ static bool actions_are_pending(const GList *actions) { for (const GList *action = actions; action != NULL; action = action->next) { const pcmk_action_t *a = (const pcmk_action_t *) action->data; if (action_is_pending(a)) { crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags); return true; } } return false; } static void print_pending_actions(pcmk__output_t *out, GList *actions) { GList *action; out->info(out, "Pending actions:"); for (action = actions; action != NULL; action = action->next) { pcmk_action_t *a = (pcmk_action_t *) action->data; if (!action_is_pending(a)) { continue; } if (a->node) { out->info(out, "\tAction %d: %s\ton %s", a->id, a->uuid, pcmk__node_name(a->node)); } else { out->info(out, "\tAction %d: %s", a->id, a->uuid); } } } /* For --wait, timeout (in seconds) to use if caller doesn't specify one */ #define WAIT_DEFAULT_TIMEOUT_S (60 * 60) /* For --wait, how long to sleep between cluster state checks */ #define WAIT_SLEEP_S (2) /*! * \internal * \brief Wait until all pending cluster actions are complete * * This waits until either the CIB's transition graph is idle or a timeout is * reached. * * \param[in,out] out Output object * \param[in] timeout_ms Consider failed if actions do not complete in * this time (specified in milliseconds, but * one-second granularity is actually used; if 0, a * default will be used) * \param[in,out] cib Connection to the CIB manager * * \return Standard Pacemaker return code */ int wait_till_stable(pcmk__output_t *out, guint timeout_ms, cib_t * cib) { // @FIXME This should bail out when run with CIB_file pcmk_scheduler_t *scheduler = NULL; xmlXPathObject *search = NULL; int rc = pcmk_rc_ok; bool pending_unknown_state_resources; time_t expire_time = time(NULL); time_t time_diff; bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet char *xpath = NULL; if (timeout_ms == 0) { expire_time += WAIT_DEFAULT_TIMEOUT_S; } else { expire_time += pcmk__timeout_ms2s(timeout_ms + 999); } scheduler = pcmk_new_scheduler(); if (scheduler == NULL) { return ENOMEM; } xpath = crm_strdup_printf("/" PCMK_XE_CIB "/" PCMK_XE_STATUS "/" PCMK__XE_NODE_STATE "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES "/" PCMK__XE_LRM_RESOURCE "/" PCMK__XE_LRM_RSC_OP "[@" PCMK__XA_RC_CODE "='%d']", PCMK_OCF_UNKNOWN); do { /* Abort if timeout is reached */ time_diff = expire_time - time(NULL); if (time_diff <= 0) { print_pending_actions(out, scheduler->priv->actions); rc = ETIME; break; } crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff); if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */ sleep(WAIT_SLEEP_S); } /* Get latest transition graph */ pcmk_reset_scheduler(scheduler); rc = update_scheduler_input(out, scheduler, cib, NULL); if (rc != pcmk_rc_ok) { break; } pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts); pcmk__schedule_actions(scheduler); if (!printed_version_warning) { /* If the DC has a different version than the local node, the two * could come to different conclusions about what actions need to be * done. Warn the user in this case. * * @TODO A possible long-term solution would be to reimplement the * wait as a new controller operation that would be forwarded to the * DC. However, that would have potential problems of its own. */ const char *dc_version = NULL; dc_version = g_hash_table_lookup(scheduler->priv->options, PCMK_OPT_DC_VERSION); if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) { out->info(out, "warning: wait option may not work properly in " "mixed-version cluster"); printed_version_warning = true; } } search = pcmk__xpath_search(scheduler->input->doc, xpath); pending_unknown_state_resources = (pcmk__xpath_num_results(search) > 0); xmlXPathFreeObject(search); } while (actions_are_pending(scheduler->priv->actions) || pending_unknown_state_resources); pcmk_free_scheduler(scheduler); free(xpath); return rc; } static const char * get_action(const char *rsc_action) { const char *action = NULL; if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) { action = PCMK_ACTION_VALIDATE_ALL; } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) { action = PCMK_ACTION_MONITOR; } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop", "force-demote", "force-promote", NULL)) { action = rsc_action+6; } else { action = rsc_action; } return action; } /*! * \brief Set up environment variables as expected by resource agents * * When the cluster executes resource agents, it adds certain environment * variables (directly or via resource meta-attributes) expected by some * resource agents. Add the essential ones that many resource agents expect, so * the behavior is the same for command-line execution. * * \param[in,out] params Resource parameters that will be passed to agent * \param[in] timeout_ms Action timeout (in milliseconds) * \param[in] check_level OCF check level * \param[in] verbosity Verbosity level */ static void set_agent_environment(GHashTable *params, guint timeout_ms, int check_level, int verbosity) { g_hash_table_insert(params, crm_meta_name(PCMK_META_TIMEOUT), crm_strdup_printf("%u", timeout_ms)); pcmk__insert_dup(params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); if (check_level >= 0) { char *level = crm_strdup_printf("%d", check_level); setenv("OCF_CHECK_LEVEL", level, 1); free(level); } pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true); if (verbosity > 1) { setenv("OCF_TRACE_RA", "1", 1); } /* A resource agent using the standard ocf-shellfuncs library will not print * messages to stderr if it doesn't have a controlling terminal (e.g. if * crm_resource is called via script or ssh). This forces it to do so. */ setenv("OCF_TRACE_FILE", "/dev/stderr", 0); } /*! * \internal * \brief Apply command-line overrides to resource parameters * * \param[in,out] params Parameters to be passed to agent * \param[in] overrides Parameters to override (or NULL if none) */ static void apply_overrides(GHashTable *params, GHashTable *overrides) { if (overrides != NULL) { GHashTableIter iter; char *name = NULL; char *value = NULL; g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { pcmk__insert_dup(params, name, value); } } } /* Takes ownership of params. * Does not modify override_hash or its contents. */ crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name, const char *rsc_class, const char *rsc_prov, const char *rsc_type, const char *rsc_action, GHashTable *params, GHashTable *override_hash, guint timeout_ms, int resource_verbose, gboolean force, int check_level) { const char *class = rsc_class; const char *action = get_action(rsc_action); crm_exit_t exit_code = CRM_EX_OK; svc_action_t *op = NULL; // If no timeout was provided, use the same default as the cluster if (timeout_ms == 0U) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } set_agent_environment(params, timeout_ms, check_level, resource_verbose); apply_overrides(params, override_hash); // services__create_resource_action() takes ownership of params on success op = services__create_resource_action(rsc_name? rsc_name : "test", rsc_class, rsc_prov, rsc_type, action, 0, QB_MIN(timeout_ms, INT_MAX), params, 0); if (op == NULL) { out->err(out, "Could not execute %s using %s%s%s:%s: %s", action, rsc_class, (rsc_prov? ":" : ""), (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM)); g_hash_table_destroy(params); return CRM_EX_OSERR; } #if PCMK__ENABLE_SERVICE if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) { class = resources_find_service_class(rsc_type); } #endif if (!pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_cli_exec)) { services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR, "Manual execution of the %s standard is " "unsupported", pcmk__s(class, "unspecified")); } if (op->rc != PCMK_OCF_UNKNOWN) { exit_code = op->rc; goto done; } services_action_sync(op); // Map results to OCF codes for consistent reporting to user { enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc); // Cast variable instead of function return to keep compilers happy exit_code = (crm_exit_t) ocf_code; } done: out->message(out, "resource-agent-action", resource_verbose, rsc_class, rsc_prov, rsc_type, rsc_name, rsc_action, override_hash, exit_code, op->status, services__exit_reason(op), op->stdout_data, op->stderr_data); services_action_free(op); return exit_code; } /*! * \internal * \brief Get the timeout the cluster would use for an action * * \param[in] rsc Resource that action is for * \param[in] action Name of action */ static guint get_action_timeout(pcmk_resource_t *rsc, const char *action) { long long timeout_ms = -1LL; xmlNode *op = pcmk__find_action_config(rsc, action, 0, true); GHashTable *meta = pcmk__unpack_action_meta(rsc, NULL, action, 0, op); if ((pcmk__scan_ll(g_hash_table_lookup(meta, PCMK_META_TIMEOUT), &timeout_ms, -1LL) != pcmk_rc_ok) || (timeout_ms <= 0LL)) { timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS; } g_hash_table_destroy(meta); return (guint) QB_MIN(timeout_ms, UINT_MAX); } // Does not modify override_hash or its contents crm_exit_t cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name, const char *rsc_action, GHashTable *override_hash, guint timeout_ms, cib_t *cib, int resource_verbose, bool force, int check_level) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; crm_exit_t exit_code = CRM_EX_OK; const char *rid = requested_name; const char *rtype = NULL; const char *rprov = NULL; const char *rclass = NULL; GHashTable *params = NULL; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote", "force-promote", NULL)) { if (pcmk__is_clone(rsc)) { GList *nodes = cli_resource_search(rsc, requested_name); if(nodes != NULL && force == FALSE) { out->err(out, "It is not safe to %s %s here: the cluster claims it is already active", rsc_action, rsc->id); out->err(out, "Try setting " PCMK_META_TARGET_ROLE "=" PCMK_ROLE_STOPPED " first or specifying the force option"); return CRM_EX_UNSAFE; } g_list_free_full(nodes, free); } } if (pcmk__is_clone(rsc)) { /* Grab the first child resource in the hope it's not a group */ rsc = rsc->priv->children->data; } if (pcmk__is_group(rsc)) { out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } else if (pcmk__is_bundled(rsc)) { out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action); return CRM_EX_UNIMPLEMENT_FEATURE; } rclass = pcmk__xe_get(rsc->priv->xml, PCMK_XA_CLASS); rprov = pcmk__xe_get(rsc->priv->xml, PCMK_XA_PROVIDER); rtype = pcmk__xe_get(rsc->priv->xml, PCMK_XA_TYPE); params = generate_resource_params(rsc); // @TODO use local node if (timeout_ms == 0U) { timeout_ms = get_action_timeout(rsc, get_action(rsc_action)); } if (!pcmk__is_anonymous_clone(rsc->priv->parent)) { rid = rsc->id; } exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action, params, override_hash, timeout_ms, resource_verbose, force, check_level); return exit_code; } // \return Standard Pacemaker return code int cli_resource_move(pcmk_resource_t *rsc, const char *rsc_id, const pcmk_node_t *dest, const char *move_lifetime, cib_t *cib, bool promoted_role_only, bool force) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; int rc = pcmk_rc_ok; unsigned int count = 0; pcmk_node_t *current = NULL; bool cur_is_dest = false; const char *active_s = promoted_role_only? "promoted" : "active"; pcmk__assert((rsc != NULL) && (dest != NULL)); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; if (promoted_role_only && !pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { pcmk_resource_t *p = uber_parent(rsc); if (pcmk_is_set(p->flags, pcmk__rsc_promotable)) { /* @TODO This is dead code. If rsc is part of a promotable clone, * then it has the pcmk__rsc_promotable flag set. * * This was added by 36e4b490. Prior to that commit, we were * checking whether rsc itself is the promotable clone, and if not, * trying to get a promotable clone ancestor. * * As of that commit, we check whether rsc has the promotable flag * set. But if it has a promotable clone ancestor, that flag is set. * * Question: Should we drop this block and use rsc for the move, or * should we check whether rsc is a clone instead of only checking * whether the promotable flag is set (as we did prior to 36e4b490)? * The latter seems appropriate, especially considering the block * below with promoted_count and promoted_node; but we need to trace * and test. */ out->info(out, "Using parent '%s' for move instead of '%s'", rsc->id, rsc_id); rsc_id = p->id; rsc = p; } else { out->info(out, "Ignoring --promoted option: %s is not promotable", rsc_id); promoted_role_only = false; } } current = pe__find_active_requires(rsc, &count); if (pcmk_is_set(rsc->flags, pcmk__rsc_promotable)) { unsigned int promoted_count = 0; pcmk_node_t *promoted_node = NULL; for (GList *iter = rsc->priv->children; iter != NULL; iter = iter->next) { pcmk_resource_t *child = iter->data; enum rsc_role_e child_role = child->priv->fns->state(child, true); if (child_role == pcmk_role_promoted) { rsc = child; promoted_node = pcmk__current_node(child); promoted_count++; } } if (promoted_role_only || (promoted_count != 0)) { count = promoted_count; current = promoted_node; } } if (count > 1) { if (!pcmk__is_clone(rsc)) { return pcmk_rc_multiple; } current = NULL; } if (pcmk__same_node(current, dest)) { if (!force) { return pcmk_rc_already; } cur_is_dest = true; crm_info("%s is already %s on %s, reinforcing placement with location " "constraint", rsc_id, active_s, pcmk__node_name(dest)); } /* @TODO The constraint changes in the following commands should done * atomically in a single CIB transaction, to avoid the possibility of * multiple moves */ /* Clear any previous prefer constraints across all nodes. */ cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, false, force); /* Clear any previous ban constraints on 'dest'. */ cli_resource_clear(rsc_id, dest->priv->name, scheduler->nodes, cib, true, force); /* Record an explicit preference for 'dest' */ rc = cli_resource_prefer(out, rsc_id, dest->priv->name, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); crm_trace("%s%s now prefers %s%s", rsc->id, (promoted_role_only? " (promoted)" : ""), pcmk__node_name(dest), (force? " (forced)" : "")); /* Ban the current location if force is set and the current location is not * the destination. It is possible to use move to enforce a location without * regard for where the resource is currently located. */ if (force && !cur_is_dest) { /* Ban the original location if possible */ if (current != NULL) { cli_resource_ban(out, rsc_id, current->priv->name, move_lifetime, cib, promoted_role_only, PCMK_ROLE_PROMOTED); } else if (count > 1) { out->info(out, "Resource '%s' is currently %s in %u locations. " "One may now move to %s", rsc_id, active_s, count, pcmk__node_name(dest)); out->info(out, "To prevent '%s' from being %s at a specific location, " "specify a node", rsc_id, active_s); } else { crm_trace("Not banning %s from its current location: not active", rsc_id); } } return rc; }