diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c index 6acdfb0937..79e496ae85 100644 --- a/daemons/attrd/attrd_corosync.c +++ b/daemons/attrd/attrd_corosync.c @@ -1,564 +1,582 @@ /* * Copyright 2013-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" extern crm_exit_t attrd_exit_status; static void attrd_peer_message(crm_node_t *peer, xmlNode *xml) { const char *election_op = crm_element_value(xml, F_CRM_TASK); if (election_op) { attrd_handle_election_op(peer, xml); return; } if (attrd_shutting_down()) { /* If we're shutting down, we want to continue responding to election * ops as long as we're a cluster member (because our vote may be * needed). Ignore all other messages. */ return; } else { pcmk__request_t request = { .ipc_client = NULL, .ipc_id = 0, .ipc_flags = 0, .peer = peer->uname, .xml = xml, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; request.op = crm_element_value_copy(request.xml, PCMK__XA_TASK); CRM_CHECK(request.op != NULL, return); attrd_handle_request(&request); pcmk__reset_request(&request); } } static void attrd_cpg_dispatch(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { uint32_t kind = 0; xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from); if(data == NULL) { return; } if (kind == crm_class_cluster) { xml = string2xml(data); } if (xml == NULL) { crm_err("Bad message of class %d received from %s[%u]: '%.120s'", kind, from, nodeid, data); } else { crm_node_t *peer = crm_get_peer(nodeid, from); attrd_peer_message(peer, xml); } free_xml(xml); free(data); } static void attrd_cpg_destroy(gpointer unused) { if (attrd_shutting_down()) { crm_info("Corosync disconnection complete"); } else { crm_crit("Lost connection to cluster layer, shutting down"); attrd_exit_status = CRM_EX_DISCONNECT; attrd_shutdown(0); } } /*! * \internal * \brief Override an attribute sync with a local value * * Broadcast the local node's value for an attribute that's different from the * value provided in a peer's attribute synchronization response. This ensures a * node's values for itself take precedence and all peers are kept in sync. * * \param[in] a Attribute entry to override * * \return Local instance of attribute value */ static attribute_value_t * broadcast_local_value(const attribute_t *a) { attribute_value_t *v = g_hash_table_lookup(a->values, attrd_cluster->uname); xmlNode *sync = create_xml_node(NULL, __func__); crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); attrd_add_value_xml(sync, a, v, false); attrd_send_message(NULL, sync); free_xml(sync); return v; } /*! * \internal * \brief Ensure a Pacemaker Remote node is in the correct peer cache * * \param[in] node_name Name of Pacemaker Remote node to check */ static void cache_remote_node(const char *node_name) { /* If we previously assumed this node was an unseen cluster node, * remove its entry from the cluster peer cache. */ crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name); if (dup && (dup->uuid == NULL)) { reap_crm_member(0, node_name); } // Ensure node is in the remote peer cache CRM_ASSERT(crm_remote_peer_get(node_name) != NULL); } #define state_text(state) pcmk__s((state), "in unknown state") /*! * \internal * \brief Return host's hash table entry (creating one if needed) * * \param[in,out] values Hash table of values * \param[in] host Name of peer to look up * \param[in] xml XML describing the attribute * * \return Pointer to new or existing hash table entry */ static attribute_value_t * attrd_lookup_or_create_value(GHashTable *values, const char *host, const xmlNode *xml) { attribute_value_t *v = g_hash_table_lookup(values, host); int is_remote = 0; crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote); if (is_remote) { cache_remote_node(host); } if (v == NULL) { v = calloc(1, sizeof(attribute_value_t)); CRM_ASSERT(v != NULL); pcmk__str_update(&v->nodename, host); v->is_remote = is_remote; g_hash_table_replace(values, v->nodename, v); } return(v); } static void attrd_peer_change_cb(enum crm_status_type kind, crm_node_t *peer, const void *data) { bool gone = false; bool is_remote = pcmk_is_set(peer->flags, crm_remote_node); switch (kind) { case crm_status_uname: crm_debug("%s node %s is now %s", (is_remote? "Remote" : "Cluster"), peer->uname, state_text(peer->state)); break; case crm_status_processes: if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) { gone = true; } crm_debug("Node %s is %s a peer", peer->uname, (gone? "no longer" : "now")); break; case crm_status_nstate: crm_debug("%s node %s is now %s (was %s)", (is_remote? "Remote" : "Cluster"), peer->uname, state_text(peer->state), state_text(data)); if (pcmk__str_eq(peer->state, CRM_NODE_MEMBER, pcmk__str_casei)) { /* If we're the writer, send new peers a list of all attributes * (unless it's a remote node, which doesn't run its own attrd) */ if (attrd_election_won() && !pcmk_is_set(peer->flags, crm_remote_node)) { attrd_peer_sync(peer, NULL); } } else { // Remove all attribute values associated with lost nodes attrd_peer_remove(peer->uname, false, "loss"); gone = true; } break; } // Remove votes from cluster nodes that leave, in case election in progress if (gone && !is_remote) { attrd_remove_voter(peer); // Ensure remote nodes that come up are in the remote node cache } else if (!gone && is_remote) { cache_remote_node(peer->uname); } } static void record_peer_nodeid(attribute_value_t *v, const char *host) { crm_node_t *known_peer = crm_get_peer(v->nodeid, host); crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid); if (attrd_election_won()) { attrd_write_attributes(false, false); } } static void update_attr_on_host(attribute_t *a, const crm_node_t *peer, const xmlNode *xml, const char *attr, const char *value, const char *host, bool filter, int is_force_write) { attribute_value_t *v = NULL; v = attrd_lookup_or_create_value(a->values, host, xml); if (filter && !pcmk__str_eq(v->current, value, pcmk__str_casei) && pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei)) { crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s", attr, host, v->current, value, peer->uname); v = broadcast_local_value(a); } else if (!pcmk__str_eq(v->current, value, pcmk__str_casei)) { crm_notice("Setting %s[%s]: %s -> %s " CRM_XS " from %s with %s write delay", attr, host, pcmk__s(v->current, "(unset)"), pcmk__s(value, "(unset)"), peer->uname, (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms)); pcmk__str_update(&v->current, value); a->changed = true; if (pcmk__str_eq(host, attrd_cluster->uname, pcmk__str_casei) && pcmk__str_eq(attr, XML_CIB_ATTR_SHUTDOWN, pcmk__str_none)) { if (!pcmk__str_eq(value, "0", pcmk__str_null_matches)) { attrd_set_requesting_shutdown(); } else { attrd_clear_requesting_shutdown(); } } // Write out new value or start dampening timer if (a->timeout_ms && a->timer) { crm_trace("Delayed write out (%dms) for %s", a->timeout_ms, attr); mainloop_timer_start(a->timer); } else { attrd_write_or_elect_attribute(a); } } else { if (is_force_write == 1 && a->timeout_ms && a->timer) { /* Save forced writing and set change flag. */ /* The actual attribute is written by Writer after election. */ crm_trace("Unchanged %s[%s] from %s is %s(Set the forced write flag)", attr, host, peer->uname, value); a->force_write = TRUE; } else { crm_trace("Unchanged %s[%s] from %s is %s", attr, host, peer->uname, value); } } /* Set the seen flag for attribute processing held only in the own node. */ v->seen = TRUE; /* If this is a cluster node whose node ID we are learning, remember it */ if ((v->nodeid == 0) && (v->is_remote == FALSE) && (crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, (int*)&v->nodeid) == 0) && (v->nodeid > 0)) { record_peer_nodeid(v, host); } } static void attrd_peer_update_one(const crm_node_t *peer, xmlNode *xml, bool filter) { attribute_t *a = NULL; const char *attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); const char *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); int is_force_write = 0; if (attr == NULL) { crm_warn("Could not update attribute: peer did not specify name"); return; } crm_element_value_int(xml, PCMK__XA_ATTR_FORCE, &is_force_write); a = attrd_populate_attribute(xml, attr); if (a == NULL) { return; } if (host == NULL) { // If no host was specified, update all hosts GHashTableIter vIter; crm_debug("Setting %s for all hosts to %s", attr, value); xml_remove_prop(xml, PCMK__XA_ATTR_NODE_ID); g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) { update_attr_on_host(a, peer, xml, attr, value, host, filter, is_force_write); } } else { // Update attribute value for the given host update_attr_on_host(a, peer, xml, attr, value, host, filter, is_force_write); } /* If this is a message from some attrd instance broadcasting its protocol * version, check to see if it's a new minimum version. */ if (pcmk__str_eq(attr, CRM_ATTR_PROTOCOL, pcmk__str_none)) { attrd_update_minimum_protocol_ver(value); } } static void broadcast_unseen_local_values(void) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = NULL; g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { if (!(v->seen) && pcmk__str_eq(v->nodename, attrd_cluster->uname, pcmk__str_casei)) { if (sync == NULL) { sync = create_xml_node(NULL, __func__); crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); } attrd_add_value_xml(sync, a, v, a->timeout_ms && a->timer); } } } if (sync != NULL) { crm_debug("Broadcasting local-only values"); attrd_send_message(NULL, sync); free_xml(sync); } } int attrd_cluster_connect(void) { attrd_cluster = calloc(1, sizeof(crm_cluster_t)); attrd_cluster->destroy = attrd_cpg_destroy; attrd_cluster->cpg.cpg_deliver_fn = attrd_cpg_dispatch; attrd_cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership; crm_set_status_callback(&attrd_peer_change_cb); if (crm_cluster_connect(attrd_cluster) == FALSE) { crm_err("Cluster connection failed"); return -ENOTCONN; } return pcmk_ok; } void attrd_peer_clear_failure(pcmk__request_t *request) { xmlNode *xml = request->xml; const char *rsc = crm_element_value(xml, PCMK__XA_ATTR_RESOURCE); const char *host = crm_element_value(xml, PCMK__XA_ATTR_NODE_NAME); const char *op = crm_element_value(xml, PCMK__XA_ATTR_OPERATION); const char *interval_spec = crm_element_value(xml, PCMK__XA_ATTR_INTERVAL); guint interval_ms = crm_parse_interval_spec(interval_spec); char *attr = NULL; GHashTableIter iter; regex_t regex; crm_node_t *peer = crm_get_peer(0, request->peer); if (attrd_failure_regex(®ex, rsc, op, interval_ms) != pcmk_ok) { crm_info("Ignoring invalid request to clear failures for %s", pcmk__s(rsc, "all resources")); return; } crm_xml_add(xml, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Make sure value is not set, so we delete */ if (crm_element_value(xml, PCMK__XA_ATTR_VALUE)) { crm_xml_replace(xml, PCMK__XA_ATTR_VALUE, NULL); } g_hash_table_iter_init(&iter, attributes); while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) { if (regexec(®ex, attr, 0, NULL, 0) == 0) { crm_trace("Matched %s when clearing %s", attr, pcmk__s(rsc, "all resources")); crm_xml_add(xml, PCMK__XA_ATTR_NAME, attr); attrd_peer_update(peer, xml, host, false); } } regfree(®ex); } /*! * \internal * \brief Load attributes from a peer sync response * * \param[in] peer Peer that sent clear request * \param[in] peer_won Whether peer is the attribute writer * \param[in,out] xml Request XML */ void attrd_peer_sync_response(const crm_node_t *peer, bool peer_won, xmlNode *xml) { crm_info("Processing " PCMK__ATTRD_CMD_SYNC_RESPONSE " from %s", peer->uname); if (peer_won) { /* Initialize the "seen" flag for all attributes to cleared, so we can * detect attributes that local node has but the writer doesn't. */ attrd_clear_value_seen(); } // Process each attribute update in the sync response for (xmlNode *child = pcmk__xml_first_child(xml); child != NULL; child = pcmk__xml_next(child)) { attrd_peer_update(peer, child, crm_element_value(child, PCMK__XA_ATTR_NODE_NAME), true); } if (peer_won) { /* If any attributes are still not marked as seen, the writer doesn't * know about them, so send all peers an update with them. */ broadcast_unseen_local_values(); } } /*! * \internal * \brief Remove all attributes and optionally peer cache entries for a node * * \param[in] host Name of node to purge * \param[in] uncache If true, remove node from peer caches * \param[in] source Who requested removal (only used for logging) */ void attrd_peer_remove(const char *host, bool uncache, const char *source) { attribute_t *a = NULL; GHashTableIter aIter; CRM_CHECK(host != NULL, return); crm_notice("Removing all %s attributes for peer %s", host, source); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { if(g_hash_table_remove(a->values, host)) { crm_debug("Removed %s[%s] for peer %s", a->id, host, source); } } if (uncache) { crm_remote_peer_cache_remove(host); reap_crm_member(0, host); } } void attrd_peer_sync(crm_node_t *peer, xmlNode *xml) { GHashTableIter aIter; GHashTableIter vIter; attribute_t *a = NULL; attribute_value_t *v = NULL; xmlNode *sync = create_xml_node(NULL, __func__); crm_xml_add(sync, PCMK__XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE); g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) { g_hash_table_iter_init(&vIter, a->values); while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) { crm_debug("Syncing %s[%s] = %s to %s", a->id, v->nodename, v->current, peer?peer->uname:"everyone"); attrd_add_value_xml(sync, a, v, false); } } crm_debug("Syncing values to %s", peer?peer->uname:"everyone"); attrd_send_message(peer, sync); free_xml(sync); } void attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, bool filter) { + bool handle_sync_point = false; + if (xml_has_children(xml)) { for (xmlNode *child = first_named_child(xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { /* Set the node name on the child message, assuming it isn't already. */ if (crm_element_value(child, PCMK__XA_ATTR_NODE_NAME) == NULL) { pcmk__xe_add_node(xml, host, 0); } attrd_peer_update_one(peer, child, filter); + + if (attrd_request_has_sync_point(child)) { + handle_sync_point = true; + } } } else { attrd_peer_update_one(peer, xml, filter); + + if (attrd_request_has_sync_point(xml)) { + handle_sync_point = true; + } + } + + /* If the update XML specified that the client wanted to wait for a sync + * point, process that now. + */ + if (handle_sync_point) { + crm_debug("Hit local sync point for attribute update"); + attrd_ack_waitlist_clients(attrd_sync_point_local, xml); } } diff --git a/daemons/attrd/attrd_messages.c b/daemons/attrd/attrd_messages.c index 9e8ae4084e..c96700f004 100644 --- a/daemons/attrd/attrd_messages.c +++ b/daemons/attrd/attrd_messages.c @@ -1,265 +1,275 @@ /* * Copyright 2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include "pacemaker-attrd.h" int minimum_protocol_version = -1; static GHashTable *attrd_handlers = NULL; static xmlNode * handle_unknown_request(pcmk__request_t *request) { crm_err("Unknown IPC request %s from %s %s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request)); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown request type '%s' (bug?)", request->op); return NULL; } static xmlNode * handle_clear_failure_request(pcmk__request_t *request) { if (request->peer != NULL) { /* It is not currently possible to receive this as a peer command, * but will be, if we one day enable propagating this operation. */ attrd_peer_clear_failure(request); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { return attrd_client_clear_failure(request); } } static xmlNode * handle_flush_request(pcmk__request_t *request) { if (request->peer != NULL) { /* Ignore. The flush command was removed in 2.0.0 but may be * received from peers running older versions. */ pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { return handle_unknown_request(request); } } static xmlNode * handle_query_request(pcmk__request_t *request) { if (request->peer != NULL) { return handle_unknown_request(request); } else { return attrd_client_query(request); } } static xmlNode * handle_remove_request(pcmk__request_t *request) { if (request->peer != NULL) { const char *host = crm_element_value(request->xml, PCMK__XA_ATTR_NODE_NAME); attrd_peer_remove(host, true, request->peer); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { return attrd_client_peer_remove(request); } } static xmlNode * handle_refresh_request(pcmk__request_t *request) { if (request->peer != NULL) { return handle_unknown_request(request); } else { return attrd_client_refresh(request); } } static xmlNode * handle_sync_request(pcmk__request_t *request) { if (request->peer != NULL) { crm_node_t *peer = crm_get_peer(0, request->peer); attrd_peer_sync(peer, request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { return handle_unknown_request(request); } } static xmlNode * handle_sync_response_request(pcmk__request_t *request) { if (request->ipc_client != NULL) { return handle_unknown_request(request); } else { if (request->peer != NULL) { crm_node_t *peer = crm_get_peer(0, request->peer); bool peer_won = attrd_check_for_new_writer(peer, request->xml); if (!pcmk__str_eq(peer->uname, attrd_cluster->uname, pcmk__str_casei)) { attrd_peer_sync_response(peer, peer_won, request->xml); } } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } } static xmlNode * handle_update_request(pcmk__request_t *request) { if (request->peer != NULL) { const char *host = crm_element_value(request->xml, PCMK__XA_ATTR_NODE_NAME); crm_node_t *peer = crm_get_peer(0, request->peer); attrd_peer_update(peer, request->xml, host, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { - if (!attrd_request_has_sync_point(request->xml)) { + if (attrd_request_has_sync_point(request->xml)) { + /* If this client supplied a sync point it wants to wait for, add it to + * the wait list. Clients on this list will not receive an ACK until + * their sync point is hit which will result in the client stalled there + * until it receives a response. + * + * All other clients will receive the expected response as normal. + */ + attrd_add_client_to_waitlist(request); + + } else { /* If the client doesn't want to wait for a sync point, go ahead and send * the ACK immediately. Otherwise, we'll send the ACK when the appropriate * sync point is reached. * * In the normal case, attrd_client_update can be called recursively which * makes where to send the ACK tricky. Doing it here ensures the client * only ever receives one. */ attrd_send_ack(request->ipc_client, request->ipc_id, request->flags|crm_ipc_client_response); } return attrd_client_update(request); } } static void attrd_register_handlers(void) { pcmk__server_command_t handlers[] = { { PCMK__ATTRD_CMD_CLEAR_FAILURE, handle_clear_failure_request }, { PCMK__ATTRD_CMD_FLUSH, handle_flush_request }, { PCMK__ATTRD_CMD_PEER_REMOVE, handle_remove_request }, { PCMK__ATTRD_CMD_QUERY, handle_query_request }, { PCMK__ATTRD_CMD_REFRESH, handle_refresh_request }, { PCMK__ATTRD_CMD_SYNC, handle_sync_request }, { PCMK__ATTRD_CMD_SYNC_RESPONSE, handle_sync_response_request }, { PCMK__ATTRD_CMD_UPDATE, handle_update_request }, { PCMK__ATTRD_CMD_UPDATE_DELAY, handle_update_request }, { PCMK__ATTRD_CMD_UPDATE_BOTH, handle_update_request }, { NULL, handle_unknown_request }, }; attrd_handlers = pcmk__register_handlers(handlers); } void attrd_unregister_handlers(void) { if (attrd_handlers != NULL) { g_hash_table_destroy(attrd_handlers); attrd_handlers = NULL; } } void attrd_handle_request(pcmk__request_t *request) { xmlNode *reply = NULL; char *log_msg = NULL; const char *reason = NULL; if (attrd_handlers == NULL) { attrd_register_handlers(); } reply = pcmk__process_request(request, attrd_handlers); if (reply != NULL) { crm_log_xml_trace(reply, "Reply"); if (request->ipc_client != NULL) { pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply, request->ipc_flags); } else { crm_err("Not sending CPG reply to client"); } free_xml(reply); } reason = request->result.exit_reason; log_msg = crm_strdup_printf("Processed %s request from %s %s: %s%s%s%s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request), pcmk_exec_status_str(request->result.execution_status), (reason == NULL)? "" : " (", pcmk__s(reason, ""), (reason == NULL)? "" : ")"); if (!pcmk__result_ok(&request->result)) { crm_warn("%s", log_msg); } else { crm_debug("%s", log_msg); } free(log_msg); pcmk__reset_request(request); } /*! \internal \brief Broadcast private attribute for local node with protocol version */ void attrd_broadcast_protocol(void) { xmlNode *attrd_op = create_xml_node(NULL, __func__); crm_xml_add(attrd_op, F_TYPE, T_ATTRD); crm_xml_add(attrd_op, F_ORIG, crm_system_name); crm_xml_add(attrd_op, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE); crm_xml_add(attrd_op, PCMK__XA_ATTR_NAME, CRM_ATTR_PROTOCOL); crm_xml_add(attrd_op, PCMK__XA_ATTR_VALUE, ATTRD_PROTOCOL_VERSION); crm_xml_add_int(attrd_op, PCMK__XA_ATTR_IS_PRIVATE, 1); pcmk__xe_add_node(attrd_op, attrd_cluster->uname, attrd_cluster->nodeid); crm_debug("Broadcasting attrd protocol version %s for node %s", ATTRD_PROTOCOL_VERSION, attrd_cluster->uname); attrd_send_message(NULL, attrd_op); /* ends up at attrd_peer_message() */ free_xml(attrd_op); } gboolean attrd_send_message(crm_node_t * node, xmlNode * data) { crm_xml_add(data, F_TYPE, T_ATTRD); crm_xml_add(data, PCMK__XA_ATTR_VERSION, ATTRD_PROTOCOL_VERSION); attrd_xml_add_writer(data); return send_cluster_message(node, crm_msg_attrd, data, TRUE); } diff --git a/daemons/attrd/attrd_sync.c b/daemons/attrd/attrd_sync.c index 92759d2267..2981bd01b3 100644 --- a/daemons/attrd/attrd_sync.c +++ b/daemons/attrd/attrd_sync.c @@ -1,38 +1,175 @@ /* * Copyright 2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include "pacemaker-attrd.h" +/* A hash table storing clients that are waiting on a sync point to be reached. + * The key is waitlist_client - just a plain int. The obvious key would be + * the IPC client's ID, but this is not guaranteed to be unique. A single client + * could be waiting on a sync point for multiple attributes at the same time. + * + * It is not expected that this hash table will ever be especially large. + */ +static GHashTable *waitlist = NULL; +static int waitlist_client = 0; + +struct waitlist_node { + /* What kind of sync point does this node describe? */ + enum attrd_sync_point sync_point; + + /* Information required to construct and send a reply to the client. */ + char *client_id; + uint32_t ipc_id; + uint32_t flags; +}; + +static void +next_key(void) +{ + do { + waitlist_client++; + if (waitlist_client < 0) { + waitlist_client = 1; + } + } while (g_hash_table_contains(waitlist, GINT_TO_POINTER(waitlist_client))); +} + +static void +free_waitlist_node(gpointer data) +{ + struct waitlist_node *wl = (struct waitlist_node *) data; + + free(wl->client_id); + free(wl); +} + +static const char * +sync_point_str(enum attrd_sync_point sync_point) +{ + if (sync_point == attrd_sync_point_local) { + return PCMK__VALUE_LOCAL; + } else if (sync_point == attrd_sync_point_cluster) { + return PCMK__VALUE_CLUSTER; + } else { + return "unknown"; + } +} + +void +attrd_add_client_to_waitlist(pcmk__request_t *request) +{ + const char *sync_point = attrd_request_sync_point(request->xml); + struct waitlist_node *wl = NULL; + + if (sync_point == NULL) { + return; + } + + if (waitlist == NULL) { + waitlist = pcmk__intkey_table(free_waitlist_node); + } + + wl = calloc(sizeof(struct waitlist_node), 1); + + CRM_ASSERT(wl != NULL); + + wl->client_id = strdup(request->ipc_client->id); + + CRM_ASSERT(wl->client_id); + + if (pcmk__str_eq(sync_point, PCMK__VALUE_LOCAL, pcmk__str_none)) { + wl->sync_point = attrd_sync_point_local; + } else if (pcmk__str_eq(sync_point, PCMK__VALUE_CLUSTER, pcmk__str_none)) { + wl->sync_point = attrd_sync_point_cluster; + } else { + free_waitlist_node(wl); + return; + } + + wl->ipc_id = request->ipc_id; + wl->flags = request->flags; + + crm_debug("Added client %s to waitlist for %s sync point", + wl->client_id, sync_point_str(wl->sync_point)); + + next_key(); + pcmk__intkey_table_insert(waitlist, waitlist_client, wl); + + /* And then add the key to the request XML so we can uniquely identify + * it when it comes time to issue the ACK. + */ + crm_xml_add_int(request->xml, XML_LRM_ATTR_CALLID, waitlist_client); +} + +void +attrd_ack_waitlist_clients(enum attrd_sync_point sync_point, const xmlNode *xml) +{ + int callid; + gpointer value; + + if (waitlist == NULL) { + return; + } + + if (crm_element_value_int(xml, XML_LRM_ATTR_CALLID, &callid) == -1) { + crm_warn("Could not get callid from request XML"); + return; + } + + value = pcmk__intkey_table_lookup(waitlist, callid); + if (value != NULL) { + struct waitlist_node *wl = (struct waitlist_node *) value; + pcmk__client_t *client = NULL; + + if (wl->sync_point != sync_point) { + return; + } + + crm_debug("Alerting client %s for reached %s sync point", + wl->client_id, sync_point_str(wl->sync_point)); + + client = pcmk__find_client_by_id(wl->client_id); + if (client == NULL) { + return; + } + + attrd_send_ack(client, wl->ipc_id, wl->flags | crm_ipc_client_response); + + /* And then remove the client so it doesn't get alerted again. */ + pcmk__intkey_table_remove(waitlist, callid); + } +} + const char * attrd_request_sync_point(xmlNode *xml) { if (xml_has_children(xml)) { xmlNode *child = pcmk__xe_match(xml, XML_ATTR_OP, PCMK__XA_ATTR_SYNC_POINT, NULL); if (child) { return crm_element_value(child, PCMK__XA_ATTR_SYNC_POINT); } else { return NULL; } } else { return crm_element_value(xml, PCMK__XA_ATTR_SYNC_POINT); } } bool attrd_request_has_sync_point(xmlNode *xml) { return attrd_request_sync_point(xml) != NULL; } diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h index ff850bbd4b..9dd8320200 100644 --- a/daemons/attrd/pacemaker-attrd.h +++ b/daemons/attrd/pacemaker-attrd.h @@ -1,188 +1,195 @@ /* * Copyright 2013-2022 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #ifndef PACEMAKER_ATTRD__H # define PACEMAKER_ATTRD__H #include #include #include #include #include #include #include /* * Legacy attrd (all pre-1.1.11 Pacemaker versions, plus all versions when used * with the no-longer-supported CMAN or corosync-plugin stacks) is unversioned. * * With atomic attrd, each attrd will send ATTRD_PROTOCOL_VERSION with every * peer request and reply. As of Pacemaker 2.0.0, at start-up each attrd will * also set a private attribute for itself with its version, so any attrd can * determine the minimum version supported by all peers. * * Protocol Pacemaker Significant changes * -------- --------- ------------------- * 1 1.1.11 PCMK__ATTRD_CMD_UPDATE (PCMK__XA_ATTR_NAME only), * PCMK__ATTRD_CMD_PEER_REMOVE, PCMK__ATTRD_CMD_REFRESH, * PCMK__ATTRD_CMD_FLUSH, PCMK__ATTRD_CMD_SYNC, * PCMK__ATTRD_CMD_SYNC_RESPONSE * 1 1.1.13 PCMK__ATTRD_CMD_UPDATE (with PCMK__XA_ATTR_PATTERN), * PCMK__ATTRD_CMD_QUERY * 1 1.1.15 PCMK__ATTRD_CMD_UPDATE_BOTH, * PCMK__ATTRD_CMD_UPDATE_DELAY * 2 1.1.17 PCMK__ATTRD_CMD_CLEAR_FAILURE * 3 2.1.1 PCMK__ATTRD_CMD_SYNC_RESPONSE indicates remote nodes * 4 2.2.0 Multiple attributes can be updated in a single IPC * message */ #define ATTRD_PROTOCOL_VERSION "4" #define attrd_send_ack(client, id, flags) \ pcmk__ipc_send_ack((client), (id), (flags), "ack", ATTRD_PROTOCOL_VERSION, CRM_EX_INDETERMINATE) void attrd_init_mainloop(void); void attrd_run_mainloop(void); void attrd_set_requesting_shutdown(void); void attrd_clear_requesting_shutdown(void); bool attrd_requesting_shutdown(void); bool attrd_shutting_down(void); void attrd_shutdown(int nsig); void attrd_init_ipc(void); void attrd_ipc_fini(void); void attrd_cib_disconnect(void); bool attrd_value_needs_expansion(const char *value); int attrd_expand_value(const char *value, const char *old_value); /* regular expression to clear failures of all resources */ #define ATTRD_RE_CLEAR_ALL \ "^(" PCMK__FAIL_COUNT_PREFIX "|" PCMK__LAST_FAILURE_PREFIX ")-" /* regular expression to clear failure of all operations for one resource * (format takes resource name) * * @COMPAT attributes set < 1.1.17: * also match older attributes that do not have the operation part */ #define ATTRD_RE_CLEAR_ONE ATTRD_RE_CLEAR_ALL "%s(#.+_[0-9]+)?$" /* regular expression to clear failure of one operation for one resource * (format takes resource name, operation name, and interval) * * @COMPAT attributes set < 1.1.17: * also match older attributes that do not have the operation part */ #define ATTRD_RE_CLEAR_OP ATTRD_RE_CLEAR_ALL "%s(#%s_%u)?$" int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op, guint interval_ms); extern cib_t *the_cib; /* Alerts */ extern lrmd_t *the_lrmd; extern crm_trigger_t *attrd_config_read; void attrd_lrmd_disconnect(void); gboolean attrd_read_options(gpointer user_data); void attrd_cib_replaced_cb(const char *event, xmlNode * msg); void attrd_cib_updated_cb(const char *event, xmlNode *msg); int attrd_send_attribute_alert(const char *node, int nodeid, const char *attr, const char *value); // Elections void attrd_election_init(void); void attrd_election_fini(void); void attrd_start_election_if_needed(void); bool attrd_election_won(void); void attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml); bool attrd_check_for_new_writer(const crm_node_t *peer, const xmlNode *xml); void attrd_declare_winner(void); void attrd_remove_voter(const crm_node_t *peer); void attrd_xml_add_writer(xmlNode *xml); typedef struct attribute_s { char *uuid; /* TODO: Remove if at all possible */ char *id; char *set; GHashTable *values; int update; int timeout_ms; /* TODO: refactor these three as a bitmask */ bool changed; /* whether attribute value has changed since last write */ bool unknown_peer_uuids; /* whether we know we're missing a peer uuid */ gboolean is_private; /* whether to keep this attribute out of the CIB */ mainloop_timer_t *timer; char *user; gboolean force_write; /* Flag for updating attribute by ignoring delay */ } attribute_t; typedef struct attribute_value_s { uint32_t nodeid; gboolean is_remote; char *nodename; char *current; char *requested; gboolean seen; } attribute_value_t; extern crm_cluster_t *attrd_cluster; extern GHashTable *attributes; #define CIB_OP_TIMEOUT_S 120 int attrd_cluster_connect(void); void attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host, bool filter); void attrd_peer_sync(crm_node_t *peer, xmlNode *xml); void attrd_peer_remove(const char *host, bool uncache, const char *source); void attrd_peer_clear_failure(pcmk__request_t *request); void attrd_peer_sync_response(const crm_node_t *peer, bool peer_won, xmlNode *xml); void attrd_broadcast_protocol(void); xmlNode *attrd_client_peer_remove(pcmk__request_t *request); xmlNode *attrd_client_clear_failure(pcmk__request_t *request); xmlNode *attrd_client_update(pcmk__request_t *request); xmlNode *attrd_client_refresh(pcmk__request_t *request); xmlNode *attrd_client_query(pcmk__request_t *request); gboolean attrd_send_message(crm_node_t * node, xmlNode * data); xmlNode *attrd_add_value_xml(xmlNode *parent, const attribute_t *a, const attribute_value_t *v, bool force_write); void attrd_clear_value_seen(void); void attrd_free_attribute(gpointer data); void attrd_free_attribute_value(gpointer data); attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr); void attrd_write_attribute(attribute_t *a, bool ignore_delay); void attrd_write_attributes(bool all, bool ignore_delay); void attrd_write_or_elect_attribute(attribute_t *a); extern int minimum_protocol_version; void attrd_update_minimum_protocol_ver(const char *value); mainloop_timer_t *attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr); void attrd_unregister_handlers(void); void attrd_handle_request(pcmk__request_t *request); +enum attrd_sync_point { + attrd_sync_point_local, + attrd_sync_point_cluster, +}; + +void attrd_add_client_to_waitlist(pcmk__request_t *request); +void attrd_ack_waitlist_clients(enum attrd_sync_point sync_point, const xmlNode *xml); const char *attrd_request_sync_point(xmlNode *xml); bool attrd_request_has_sync_point(xmlNode *xml); #endif /* PACEMAKER_ATTRD__H */