diff --git a/daemons/attrd/attrd_ipc.c b/daemons/attrd/attrd_ipc.c index 7b5aa30d2c..43e0f41824 100644 --- a/daemons/attrd/attrd_ipc.c +++ b/daemons/attrd/attrd_ipc.c @@ -1,657 +1,657 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // PRIu32 #include #include #include #include #include #include #include #include #include #include #include "pacemaker-attrd.h" static qb_ipcs_service_t *ipcs = NULL; /*! * \internal * \brief Build the XML reply to a client query * * \param[in] attr Name of requested attribute * \param[in] host Name of requested host (or NULL for all hosts) * * \return New XML reply * \note Caller is responsible for freeing the resulting XML */ static xmlNode *build_query_reply(const char *attr, const char *host) { xmlNode *reply = pcmk__xe_create(NULL, __func__); attribute_t *a; crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_ATTRD); crm_xml_add(reply, PCMK__XA_SUBT, PCMK__ATTRD_CMD_QUERY); crm_xml_add(reply, PCMK__XA_ATTR_VERSION, ATTRD_PROTOCOL_VERSION); /* If desired attribute exists, add its value(s) to the reply */ a = g_hash_table_lookup(attributes, attr); if (a) { attribute_value_t *v; xmlNode *host_value; crm_xml_add(reply, PCMK__XA_ATTR_NAME, attr); /* Allow caller to use "localhost" to refer to local node */ if (pcmk__str_eq(host, "localhost", pcmk__str_casei)) { host = attrd_cluster->priv->node_name; crm_trace("Mapped localhost to %s", host); } /* If a specific node was requested, add its value */ if (host) { v = g_hash_table_lookup(a->values, host); host_value = pcmk__xe_create(reply, PCMK_XE_NODE); crm_xml_add(host_value, PCMK__XA_ATTR_HOST, host); crm_xml_add(host_value, PCMK__XA_ATTR_VALUE, (v? v->current : NULL)); /* Otherwise, add all nodes' values */ } else { GHashTableIter iter; g_hash_table_iter_init(&iter, a->values); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) { host_value = pcmk__xe_create(reply, PCMK_XE_NODE); crm_xml_add(host_value, PCMK__XA_ATTR_HOST, v->nodename); crm_xml_add(host_value, PCMK__XA_ATTR_VALUE, v->current); } } } return reply; } xmlNode * attrd_client_clear_failure(pcmk__request_t *request) { xmlNode *xml = request->xml; const char *rsc, *op, *interval_spec; if (minimum_protocol_version >= 2) { /* Propagate to all peers (including ourselves). * This ends up at attrd_peer_message(). */ attrd_send_message(NULL, xml, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } rsc = crm_element_value(xml, PCMK__XA_ATTR_RESOURCE); op = crm_element_value(xml, PCMK__XA_ATTR_CLEAR_OPERATION); interval_spec = crm_element_value(xml, PCMK__XA_ATTR_CLEAR_INTERVAL); /* Map this to an update */ crm_xml_add(xml, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); /* Add regular expression matching desired attributes */ if (rsc) { char *pattern; if (op == NULL) { pattern = crm_strdup_printf(ATTRD_RE_CLEAR_ONE, rsc); } else { guint interval_ms = 0U; pcmk_parse_interval_spec(interval_spec, &interval_ms); pattern = crm_strdup_printf(ATTRD_RE_CLEAR_OP, rsc, op, interval_ms); } crm_xml_add(xml, PCMK__XA_ATTR_REGEX, pattern); free(pattern); } else { crm_xml_add(xml, PCMK__XA_ATTR_REGEX, ATTRD_RE_CLEAR_ALL); } /* Make sure attribute and value are not set, so we delete via regex */ pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_NAME); pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_VALUE); return attrd_client_update(request); } xmlNode * attrd_client_peer_remove(pcmk__request_t *request) { xmlNode *xml = request->xml; // Host and ID are not used in combination, rather host has precedence const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); char *host_alloc = NULL; attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); if (host == NULL) { int nodeid = 0; crm_element_value_int(xml, PCMK__XA_ATTR_HOST_ID, &nodeid); if (nodeid > 0) { pcmk__node_status_t *node = NULL; char *host_alloc = NULL; node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_cluster_member); if ((node != NULL) && (node->name != NULL)) { // Use cached name if available host = node->name; } else { // Otherwise ask cluster layer host_alloc = pcmk__cluster_node_name(nodeid); host = host_alloc; } crm_xml_add(xml, PCMK__XA_ATTR_HOST, host); } } if (host) { crm_info("Client %s is requesting all values for %s be removed", pcmk__client_name(request->ipc_client), host); attrd_send_message(NULL, xml, false); /* ends up at attrd_peer_message() */ free(host_alloc); } else { crm_info("Ignoring request by client %s to remove all peer values without specifying peer", pcmk__client_name(request->ipc_client)); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } xmlNode * attrd_client_query(pcmk__request_t *request) { xmlNode *query = request->xml; xmlNode *reply = NULL; const char *attr = NULL; crm_debug("Query arrived from %s", pcmk__client_name(request->ipc_client)); /* Request must specify attribute name to query */ attr = crm_element_value(query, PCMK__XA_ATTR_NAME); if (attr == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Ignoring malformed query from %s (no attribute name given)", pcmk__client_name(request->ipc_client)); return NULL; } /* Build the XML reply */ reply = build_query_reply(attr, crm_element_value(query, PCMK__XA_ATTR_HOST)); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Could not respond to query from %s: could not create XML reply", pcmk__client_name(request->ipc_client)); return NULL; } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } request->ipc_client->request_id = 0; return reply; } xmlNode * attrd_client_refresh(pcmk__request_t *request) { crm_info("Updating all attributes"); attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); attrd_write_attributes(attrd_write_all|attrd_write_no_delay); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static void handle_missing_host(xmlNode *xml) { if (crm_element_value(xml, PCMK__XA_ATTR_HOST) == NULL) { crm_trace("Inferring local node %s with XML ID %s", attrd_cluster->priv->node_name, attrd_cluster->priv->node_xml_id); crm_xml_add(xml, PCMK__XA_ATTR_HOST, attrd_cluster->priv->node_name); crm_xml_add(xml, PCMK__XA_ATTR_HOST_ID, attrd_cluster->priv->node_xml_id); } } /* Convert a single IPC message with a regex into one with multiple children, one * for each regex match. */ static int expand_regexes(xmlNode *xml, const char *attr, const char *value, const char *regex) { if (attr == NULL && regex) { bool matched = false; GHashTableIter aIter; regex_t r_patt; crm_debug("Setting %s to %s", regex, value); if (regcomp(&r_patt, regex, REG_EXTENDED|REG_NOSUB)) { return EINVAL; } g_hash_table_iter_init(&aIter, attributes); while (g_hash_table_iter_next(&aIter, (gpointer *) & attr, NULL)) { int status = regexec(&r_patt, attr, 0, NULL, 0); if (status == 0) { xmlNode *child = pcmk__xe_create(xml, PCMK_XE_OP); crm_trace("Matched %s with %s", attr, regex); matched = true; /* Copy all the non-conflicting attributes from the parent over, * but remove the regex and replace it with the name. */ pcmk__xe_copy_attrs(child, xml, pcmk__xaf_no_overwrite); pcmk__xe_remove_attr(child, PCMK__XA_ATTR_REGEX); crm_xml_add(child, PCMK__XA_ATTR_NAME, attr); } } regfree(&r_patt); /* Return a code if we never matched anything. This should not be treated * as an error. It indicates there was a regex, and it was a valid regex, * but simply did not match anything and the caller should not continue * doing any regex-related processing. */ if (!matched) { return pcmk_rc_op_unsatisfied; } } else if (attr == NULL) { return pcmk_rc_bad_nvpair; } return pcmk_rc_ok; } static int handle_regexes(pcmk__request_t *request) { xmlNode *xml = request->xml; int rc = pcmk_rc_ok; const char *attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); const char *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); const char *regex = crm_element_value(xml, PCMK__XA_ATTR_REGEX); rc = expand_regexes(xml, attr, value, regex); if (rc == EINVAL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Bad regex '%s' for update from client %s", regex, pcmk__client_name(request->ipc_client)); } else if (rc == pcmk_rc_bad_nvpair) { crm_err("Update request did not specify attribute or regular expression"); pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Client %s update request did not specify attribute or regular expression", pcmk__client_name(request->ipc_client)); } return rc; } static int handle_value_expansion(const char **value, xmlNode *xml, const char *op, const char *attr) { attribute_t *a = g_hash_table_lookup(attributes, attr); if (a == NULL && pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE_DELAY, pcmk__str_none)) { return EINVAL; } if (*value && attrd_value_needs_expansion(*value)) { int int_value; attribute_value_t *v = NULL; if (a) { const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST); v = g_hash_table_lookup(a->values, host); } int_value = attrd_expand_value(*value, (v? v->current : NULL)); crm_info("Expanded %s=%s to %d", attr, *value, int_value); crm_xml_add_int(xml, PCMK__XA_ATTR_VALUE, int_value); /* Replacing the value frees the previous memory, so re-query it */ *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); } return pcmk_rc_ok; } static void send_update_msg_to_cluster(pcmk__request_t *request, xmlNode *xml) { if (pcmk__str_eq(attrd_request_sync_point(xml), PCMK__VALUE_CLUSTER, pcmk__str_none)) { /* The client is waiting on the cluster-wide sync point. In this case, * the response ACK is not sent until this attrd broadcasts the update * and receives its own confirmation back from all peers. */ attrd_expect_confirmations(request, attrd_cluster_sync_point_update); attrd_send_message(NULL, xml, true); /* ends up at attrd_peer_message() */ } else { /* The client is either waiting on the local sync point or was not * waiting on any sync point at all. For the local sync point, the * response ACK is sent in attrd_peer_update. For clients not * waiting on any sync point, the response ACK is sent in * handle_update_request immediately before this function was called. */ attrd_send_message(NULL, xml, false); /* ends up at attrd_peer_message() */ } } static int send_child_update(xmlNode *child, void *data) { pcmk__request_t *request = (pcmk__request_t *) data; /* Calling pcmk__set_result is handled by one of these calls to * attrd_client_update, so no need to do it again here. */ request->xml = child; attrd_client_update(request); return pcmk_rc_ok; } xmlNode * attrd_client_update(pcmk__request_t *request) { xmlNode *xml = NULL; const char *attr, *value, *regex; CRM_CHECK((request != NULL) && (request->xml != NULL), return NULL); xml = request->xml; /* If the message has children, that means it is a message from a newer * client that supports sending multiple operations at a time. There are * two ways we can handle that. */ if (xml->children != NULL) { if (ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version)) { /* First, if all peers support a certain protocol version, we can * just broadcast the big message and they'll handle it. However, * we also need to apply all the transformations in this function * to the children since they don't happen anywhere else. */ for (xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, NULL, NULL); child != NULL; child = pcmk__xe_next(child, PCMK_XE_OP)) { attr = crm_element_value(child, PCMK__XA_ATTR_NAME); value = crm_element_value(child, PCMK__XA_ATTR_VALUE); handle_missing_host(child); if (handle_value_expansion(&value, child, request->op, attr) == EINVAL) { pcmk__format_result(&request->result, CRM_EX_NOSUCH, PCMK_EXEC_ERROR, "Attribute %s does not exist", attr); return NULL; } } send_update_msg_to_cluster(request, xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { /* Save the original xml node pointer so it can be restored after iterating * over all the children. */ xmlNode *orig_xml = request->xml; /* Second, if they do not support that protocol version, split it * up into individual messages and call attrd_client_update on * each one. */ pcmk__xe_foreach_child(xml, PCMK_XE_OP, send_child_update, request); request->xml = orig_xml; } return NULL; } attr = crm_element_value(xml, PCMK__XA_ATTR_NAME); value = crm_element_value(xml, PCMK__XA_ATTR_VALUE); regex = crm_element_value(xml, PCMK__XA_ATTR_REGEX); if (handle_regexes(request) != pcmk_rc_ok) { /* Error handling was already dealt with in handle_regexes, so just return. */ return NULL; } else if (regex) { /* Recursively call attrd_client_update on the new message with regexes * expanded. If supported by the attribute daemon, this means that all * matches can also be handled atomically. */ return attrd_client_update(request); } handle_missing_host(xml); if (handle_value_expansion(&value, xml, request->op, attr) == EINVAL) { pcmk__format_result(&request->result, CRM_EX_NOSUCH, PCMK_EXEC_ERROR, "Attribute %s does not exist", attr); return NULL; } crm_debug("Broadcasting %s[%s]=%s%s", attr, crm_element_value(xml, PCMK__XA_ATTR_HOST), value, (attrd_election_won()? " (writer)" : "")); send_update_msg_to_cluster(request, xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } /*! * \internal * \brief Accept a new client IPC connection * * \param[in,out] c New connection * \param[in] uid Client user id * \param[in] gid Client group id * * \return pcmk_ok on success, -errno otherwise */ static int32_t attrd_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("New client connection %p", c); if (attrd_shutting_down(false)) { crm_info("Ignoring new connection from pid %d during shutdown", pcmk__client_pid(c)); return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return pcmk_ok; } /*! * \internal * \brief Destroy a client IPC connection * * \param[in] c Connection to destroy * * \return FALSE (i.e. do not re-run this callback) */ static int32_t attrd_ipc_closed(qb_ipcs_connection_t *c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { crm_trace("Ignoring request to clean up unknown connection %p", c); } else { crm_trace("Cleaning up closed client connection %p", c); /* Remove the client from the sync point waitlist if it's present. */ attrd_remove_client_from_waitlist(client); /* And no longer wait for confirmations from any peers. */ attrd_do_not_wait_for_client(client); pcmk__free_client(client); } return FALSE; } /*! * \internal * \brief Destroy a client IPC connection * * \param[in,out] c Connection to destroy * * \note We handle a destroyed connection the same as a closed one, * but we need a separate handler because the return type is different. */ static void attrd_ipc_destroy(qb_ipcs_connection_t *c) { crm_trace("Destroying client connection %p", c); attrd_ipc_closed(c); } static int32_t attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); xmlNode *xml = NULL; // Sanity-check, and parse XML from IPC data CRM_CHECK((c != NULL) && (client != NULL), return 0); if (data == NULL) { crm_debug("No IPC data from PID %d", pcmk__client_pid(c)); return 0; } rc = pcmk__ipc_msg_append(&client->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - xml = pcmk__client_data2xml(client, client->buffer->data, &id, &flags); + xml = pcmk__client_data2xml(client, &id, &flags); g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (client->buffer != NULL) { g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } return 0; } if (xml == NULL) { crm_debug("Unrecognizable IPC data from PID %d", pcmk__client_pid(c)); pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } else { pcmk__request_t request = { .ipc_client = client, .ipc_id = id, .ipc_flags = flags, .peer = NULL, .xml = xml, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; pcmk__assert(client->user != NULL); pcmk__update_acl_user(xml, PCMK__XA_ATTR_USER, client->user); request.op = crm_element_value_copy(request.xml, PCMK_XA_TASK); CRM_CHECK(request.op != NULL, return 0); attrd_handle_request(&request); pcmk__reset_request(&request); } pcmk__xml_free(xml); return 0; } static struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = attrd_ipc_accept, .connection_created = NULL, .msg_process = attrd_ipc_dispatch, .connection_closed = attrd_ipc_closed, .connection_destroyed = attrd_ipc_destroy }; void attrd_ipc_fini(void) { if (ipcs != NULL) { pcmk__drop_all_clients(ipcs); qb_ipcs_destroy(ipcs); ipcs = NULL; } attrd_unregister_handlers(); pcmk__client_cleanup(); } /*! * \internal * \brief Set up attrd IPC communication */ void attrd_init_ipc(void) { pcmk__serve_attrd_ipc(&ipcs, &ipc_callbacks); } diff --git a/daemons/based/based_callbacks.c b/daemons/based/based_callbacks.c index 0d2383eec4..13646eb4be 100644 --- a/daemons/based/based_callbacks.c +++ b/daemons/based/based_callbacks.c @@ -1,1433 +1,1432 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include // uint32_t, uint64_t, UINT64_C() #include #include #include // PRIu64 #include #include #include // xmlXPathObject, etc. #include #include #include #include #include #include #define EXIT_ESCALATION_MS 10000 qb_ipcs_service_t *ipcs_ro = NULL; qb_ipcs_service_t *ipcs_rw = NULL; qb_ipcs_service_t *ipcs_shm = NULL; static int cib_process_command(xmlNode *request, const cib__operation_t *operation, cib__op_fn_t op_function, xmlNode **reply, xmlNode **cib_diff, bool privileged); static gboolean cib_common_callback(qb_ipcs_connection_t *c, void *data, size_t size, gboolean privileged); static int32_t cib_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { if (cib_shutdown_flag) { crm_info("Ignoring new IPC client [%d] during shutdown", pcmk__client_pid(c)); return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } static int32_t cib_ipc_dispatch_rw(qb_ipcs_connection_t * c, void *data, size_t size) { pcmk__client_t *client = pcmk__find_client(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, TRUE); } static int32_t cib_ipc_dispatch_ro(qb_ipcs_connection_t * c, void *data, size_t size) { pcmk__client_t *client = pcmk__find_client(c); crm_trace("%p message from %s", c, client->id); return cib_common_callback(c, data, size, FALSE); } /* Error code means? */ static int32_t cib_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); pcmk__free_client(client); return 0; } static void cib_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); cib_ipc_closed(c); if (cib_shutdown_flag) { cib_shutdown(0); } } struct qb_ipcs_service_handlers ipc_ro_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = NULL, .msg_process = cib_ipc_dispatch_ro, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; struct qb_ipcs_service_handlers ipc_rw_callbacks = { .connection_accept = cib_ipc_accept, .connection_created = NULL, .msg_process = cib_ipc_dispatch_rw, .connection_closed = cib_ipc_closed, .connection_destroyed = cib_ipc_destroy }; /*! * \internal * \brief Create reply XML for a CIB request * * \param[in] op CIB operation type * \param[in] call_id CIB call ID * \param[in] client_id CIB client ID * \param[in] call_options Group of enum cib_call_options flags * \param[in] rc Request return code * \param[in] call_data Request output data * * \return Reply XML (guaranteed not to be \c NULL) * * \note The caller is responsible for freeing the return value using * \p pcmk__xml_free(). */ static xmlNode * create_cib_reply(const char *op, const char *call_id, const char *client_id, uint32_t call_options, int rc, xmlNode *call_data) { xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_CIB_REPLY); crm_xml_add(reply, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(reply, PCMK__XA_CIB_OP, op); crm_xml_add(reply, PCMK__XA_CIB_CALLID, call_id); crm_xml_add(reply, PCMK__XA_CIB_CLIENTID, client_id); crm_xml_add_int(reply, PCMK__XA_CIB_CALLOPT, call_options); crm_xml_add_int(reply, PCMK__XA_CIB_RC, rc); if (call_data != NULL) { xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_CIB_CALLDATA); crm_trace("Attaching reply output"); pcmk__xml_copy(wrapper, call_data); } crm_log_xml_explicit(reply, "cib:reply"); return reply; } static void do_local_notify(const xmlNode *notify_src, const char *client_id, bool sync_reply, bool from_peer) { int msg_id = 0; int rc = pcmk_rc_ok; pcmk__client_t *client_obj = NULL; uint32_t flags = crm_ipc_server_event; CRM_CHECK((notify_src != NULL) && (client_id != NULL), return); crm_element_value_int(notify_src, PCMK__XA_CIB_CALLID, &msg_id); client_obj = pcmk__find_client_by_id(client_id); if (client_obj == NULL) { crm_debug("Could not notify client %s%s %s of call %d result: " "client no longer exists", client_id, (from_peer? " (originator of delegated request)" : ""), (sync_reply? "synchronously" : "asynchronously"), msg_id); return; } if (sync_reply) { flags = crm_ipc_flags_none; if (client_obj->ipcs != NULL) { msg_id = client_obj->request_id; client_obj->request_id = 0; } } switch (PCMK__CLIENT_TYPE(client_obj)) { case pcmk__client_ipc: rc = pcmk__ipc_send_xml(client_obj, msg_id, notify_src, flags); break; case pcmk__client_tls: case pcmk__client_tcp: rc = pcmk__remote_send_xml(client_obj->remote, notify_src); break; default: rc = EPROTONOSUPPORT; break; } if (rc == pcmk_rc_ok) { crm_trace("Notified %s client %s%s %s of call %d result", pcmk__client_type_str(PCMK__CLIENT_TYPE(client_obj)), pcmk__client_name(client_obj), (from_peer? " (originator of delegated request)" : ""), (sync_reply? "synchronously" : "asynchronously"), msg_id); } else { crm_warn("Could not notify %s client %s%s %s of call %d result: %s", pcmk__client_type_str(PCMK__CLIENT_TYPE(client_obj)), pcmk__client_name(client_obj), (from_peer? " (originator of delegated request)" : ""), (sync_reply? "synchronously" : "asynchronously"), msg_id, pcmk_rc_str(rc)); } } void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request, pcmk__client_t *cib_client, gboolean privileged) { const char *op = crm_element_value(op_request, PCMK__XA_CIB_OP); uint32_t call_options = cib_none; int rc = pcmk_rc_ok; rc = pcmk__xe_get_flags(op_request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } /* Requests with cib_transaction set should not be sent to based directly * (outside of a commit-transaction request) */ if (pcmk_is_set(call_options, cib_transaction)) { return; } if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) { if (flags & crm_ipc_client_response) { xmlNode *ack = pcmk__xe_create(NULL, __func__); crm_xml_add(ack, PCMK__XA_CIB_OP, CRM_OP_REGISTER); crm_xml_add(ack, PCMK__XA_CIB_CLIENTID, cib_client->id); pcmk__ipc_send_xml(cib_client, id, ack, flags); cib_client->request_id = 0; pcmk__xml_free(ack); } return; } else if (pcmk__str_eq(op, PCMK__VALUE_CIB_NOTIFY, pcmk__str_none)) { /* Update the notify filters for this client */ int on_off = 0; crm_exit_t status = CRM_EX_OK; uint64_t bit = UINT64_C(0); const char *type = crm_element_value(op_request, PCMK__XA_CIB_NOTIFY_TYPE); crm_element_value_int(op_request, PCMK__XA_CIB_NOTIFY_ACTIVATE, &on_off); crm_debug("Setting %s callbacks %s for client %s", type, (on_off? "on" : "off"), pcmk__client_name(cib_client)); if (pcmk__str_eq(type, PCMK__VALUE_CIB_POST_NOTIFY, pcmk__str_none)) { bit = cib_notify_post; } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_PRE_NOTIFY, pcmk__str_none)) { bit = cib_notify_pre; } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_UPDATE_CONFIRMATION, pcmk__str_none)) { bit = cib_notify_confirm; } else if (pcmk__str_eq(type, PCMK__VALUE_CIB_DIFF_NOTIFY, pcmk__str_none)) { bit = cib_notify_diff; } else { status = CRM_EX_INVALID_PARAM; } if (bit != 0) { if (on_off) { pcmk__set_client_flags(cib_client, bit); } else { pcmk__clear_client_flags(cib_client, bit); } } pcmk__ipc_send_ack(cib_client, id, flags, PCMK__XE_ACK, NULL, status); return; } cib_process_request(op_request, privileged, cib_client); } int32_t cib_common_callback(qb_ipcs_connection_t * c, void *data, size_t size, gboolean privileged) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; uint32_t call_options = cib_none; pcmk__client_t *cib_client = pcmk__find_client(c); xmlNode *op_request = NULL; if (cib_client == NULL) { crm_trace("Invalid client %p", c); return 0; } rc = pcmk__ipc_msg_append(&cib_client->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - op_request = pcmk__client_data2xml(cib_client, cib_client->buffer->data, - &id, &flags); + op_request = pcmk__client_data2xml(cib_client, &id, &flags); g_byte_array_free(cib_client->buffer, TRUE); cib_client->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (cib_client->buffer != NULL) { g_byte_array_free(cib_client->buffer, TRUE); cib_client->buffer = NULL; } return 0; } if (op_request) { int rc = pcmk_rc_ok; rc = pcmk__xe_get_flags(op_request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } } if (op_request == NULL) { crm_trace("Invalid message from %p", c); pcmk__ipc_send_ack(cib_client, id, flags, PCMK__XE_NACK, NULL, CRM_EX_PROTOCOL); return 0; } if (pcmk_is_set(call_options, cib_sync_call)) { CRM_LOG_ASSERT(flags & crm_ipc_client_response); CRM_LOG_ASSERT(cib_client->request_id == 0); /* This means the client has two synchronous events in-flight */ cib_client->request_id = id; /* Reply only to the last one */ } if (cib_client->name == NULL) { const char *value = crm_element_value(op_request, PCMK__XA_CIB_CLIENTNAME); if (value == NULL) { cib_client->name = pcmk__itoa(cib_client->pid); } else { cib_client->name = pcmk__str_copy(value); if (pcmk__parse_server(value) != pcmk_ipc_unknown) { pcmk__set_client_flags(cib_client, cib_is_daemon); } } } /* Allow cluster daemons more leeway before being evicted */ if (pcmk_is_set(cib_client->flags, cib_is_daemon)) { const char *qmax = cib_config_lookup(PCMK_OPT_CLUSTER_IPC_LIMIT); pcmk__set_client_queue_max(cib_client, qmax); } crm_xml_add(op_request, PCMK__XA_CIB_CLIENTID, cib_client->id); crm_xml_add(op_request, PCMK__XA_CIB_CLIENTNAME, cib_client->name); CRM_LOG_ASSERT(cib_client->user != NULL); pcmk__update_acl_user(op_request, PCMK__XA_CIB_USER, cib_client->user); cib_common_callback_worker(id, flags, op_request, cib_client, privileged); pcmk__xml_free(op_request); return 0; } static uint64_t ping_seq = 0; static char *ping_digest = NULL; static bool ping_modified_since = FALSE; static gboolean cib_digester_cb(gpointer data) { if (based_is_primary) { char buffer[32]; xmlNode *ping = pcmk__xe_create(NULL, PCMK__XE_PING); ping_seq++; free(ping_digest); ping_digest = NULL; ping_modified_since = FALSE; snprintf(buffer, 32, "%" PRIu64, ping_seq); crm_trace("Requesting peer digests (%s)", buffer); crm_xml_add(ping, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(ping, PCMK__XA_CIB_OP, CRM_OP_PING); crm_xml_add(ping, PCMK__XA_CIB_PING_ID, buffer); crm_xml_add(ping, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); pcmk__cluster_send_message(NULL, pcmk_ipc_based, ping); pcmk__xml_free(ping); } return FALSE; } static void process_ping_reply(xmlNode *reply) { uint64_t seq = 0; const char *host = crm_element_value(reply, PCMK__XA_SRC); xmlNode *wrapper = pcmk__xe_first_child(reply, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *pong = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *seq_s = crm_element_value(pong, PCMK__XA_CIB_PING_ID); const char *digest = crm_element_value(pong, PCMK__XA_DIGEST); if (seq_s == NULL) { crm_debug("Ignoring ping reply with no " PCMK__XA_CIB_PING_ID); return; } else { long long seq_ll; int rc = pcmk__scan_ll(seq_s, &seq_ll, 0LL); if (rc != pcmk_rc_ok) { crm_debug("Ignoring ping reply with invalid " PCMK__XA_CIB_PING_ID " '%s': %s", seq_s, pcmk_rc_str(rc)); return; } seq = (uint64_t) seq_ll; } if(digest == NULL) { crm_trace("Ignoring ping reply %s from %s with no digest", seq_s, host); } else if(seq != ping_seq) { crm_trace("Ignoring out of sequence ping reply %s from %s", seq_s, host); } else if(ping_modified_since) { crm_trace("Ignoring ping reply %s from %s: cib updated since", seq_s, host); } else { if(ping_digest == NULL) { crm_trace("Calculating new digest"); ping_digest = pcmk__digest_xml(the_cib, true); } crm_trace("Processing ping reply %s from %s (%s)", seq_s, host, digest); if (!pcmk__str_eq(ping_digest, digest, pcmk__str_casei)) { xmlNode *wrapper = pcmk__xe_first_child(pong, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *remote_cib = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *admin_epoch_s = NULL; const char *epoch_s = NULL; const char *num_updates_s = NULL; if (remote_cib != NULL) { admin_epoch_s = crm_element_value(remote_cib, PCMK_XA_ADMIN_EPOCH); epoch_s = crm_element_value(remote_cib, PCMK_XA_EPOCH); num_updates_s = crm_element_value(remote_cib, PCMK_XA_NUM_UPDATES); } crm_notice("Local CIB %s.%s.%s.%s differs from %s: %s.%s.%s.%s %p", crm_element_value(the_cib, PCMK_XA_ADMIN_EPOCH), crm_element_value(the_cib, PCMK_XA_EPOCH), crm_element_value(the_cib, PCMK_XA_NUM_UPDATES), ping_digest, host, pcmk__s(admin_epoch_s, "_"), pcmk__s(epoch_s, "_"), pcmk__s(num_updates_s, "_"), digest, remote_cib); if(remote_cib && remote_cib->children) { // Additional debug pcmk__xml_mark_changes(the_cib, remote_cib); pcmk__log_xml_changes(LOG_INFO, remote_cib); crm_trace("End of differences"); } pcmk__xml_free(remote_cib); sync_our_cib(reply, FALSE); } } } static void parse_local_options(const pcmk__client_t *cib_client, const cib__operation_t *operation, const char *host, const char *op, gboolean *local_notify, gboolean *needs_reply, gboolean *process, gboolean *needs_forward) { // Process locally and notify local client *process = TRUE; *needs_reply = FALSE; *local_notify = TRUE; *needs_forward = FALSE; if (pcmk_is_set(operation->flags, cib__op_attr_local)) { /* Always process locally if cib__op_attr_local is set. * * @COMPAT: Currently host is ignored. At a compatibility break, throw * an error (from cib_process_request() or earlier) if host is not NULL or * OUR_NODENAME. */ crm_trace("Processing always-local %s op from client %s", op, pcmk__client_name(cib_client)); if (!pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei|pcmk__str_null_matches)) { crm_warn("Operation '%s' is always local but its target host is " "set to '%s'", op, host); } return; } if (pcmk_is_set(operation->flags, cib__op_attr_modifies) || !pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei|pcmk__str_null_matches)) { // Forward modifying and non-local requests via cluster *process = FALSE; *needs_reply = FALSE; *local_notify = FALSE; *needs_forward = TRUE; crm_trace("%s op from %s needs to be forwarded to %s", op, pcmk__client_name(cib_client), pcmk__s(host, "all nodes")); return; } if (stand_alone) { crm_trace("Processing %s op from client %s (stand-alone)", op, pcmk__client_name(cib_client)); } else { crm_trace("Processing %saddressed %s op from client %s", ((host != NULL)? "locally " : "un"), op, pcmk__client_name(cib_client)); } } static gboolean parse_peer_options(const cib__operation_t *operation, xmlNode *request, gboolean *local_notify, gboolean *needs_reply, gboolean *process) { /* TODO: What happens when an update comes in after node A * requests the CIB from node B, but before it gets the reply (and * sends out the replace operation)? * * (This may no longer be relevant since legacy mode was dropped; need to * trace code more closely to check.) */ const char *host = NULL; const char *delegated = crm_element_value(request, PCMK__XA_CIB_DELEGATED_FROM); const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *originator = crm_element_value(request, PCMK__XA_SRC); const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); gboolean is_reply = pcmk__str_eq(reply_to, OUR_NODENAME, pcmk__str_casei); if (originator == NULL) { // Shouldn't be possible originator = "peer"; } if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) { // sync_our_cib() sets PCMK__XA_CIB_ISREPLYTO if (reply_to) { delegated = reply_to; } goto skip_is_reply; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SYNC_TO_ALL, pcmk__str_none)) { // Nothing to do } else if (is_reply && pcmk__str_eq(op, CRM_OP_PING, pcmk__str_casei)) { process_ping_reply(request); return FALSE; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_UPGRADE, pcmk__str_none)) { /* Only the DC (node with the oldest software) should process * this operation if PCMK__XA_CIB_SCHEMA_MAX is unset. * * If the DC is happy it will then send out another * PCMK__CIB_REQUEST_UPGRADE which will tell all nodes to do the actual * upgrade. * * Except this time PCMK__XA_CIB_SCHEMA_MAX will be set which puts a * limit on how far newer nodes will go */ const char *max = crm_element_value(request, PCMK__XA_CIB_SCHEMA_MAX); const char *upgrade_rc = crm_element_value(request, PCMK__XA_CIB_UPGRADE_RC); crm_trace("Parsing upgrade %s for %s with max=%s and upgrade_rc=%s", (is_reply? "reply" : "request"), (based_is_primary? "primary" : "secondary"), pcmk__s(max, "none"), pcmk__s(upgrade_rc, "none")); if (upgrade_rc != NULL) { // Our upgrade request was rejected by DC, notify clients of result crm_xml_add(request, PCMK__XA_CIB_RC, upgrade_rc); } else if ((max == NULL) && based_is_primary) { /* We are the DC, check if this upgrade is allowed */ goto skip_is_reply; } else if(max) { /* Ok, go ahead and upgrade to 'max' */ goto skip_is_reply; } else { // Ignore broadcast client requests when we're not primary return FALSE; } } else if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { crm_info("Detected legacy %s global update from %s", op, originator); send_sync_request(NULL); return FALSE; } else if (is_reply && pcmk_is_set(operation->flags, cib__op_attr_modifies)) { crm_trace("Ignoring legacy %s reply sent from %s to local clients", op, originator); return FALSE; } else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) { *local_notify = FALSE; if (reply_to == NULL) { *process = TRUE; } else { // Not possible? crm_debug("Ignoring shutdown request from %s because reply_to=%s", originator, reply_to); } return *process; } if (is_reply) { crm_trace("Will notify local clients for %s reply from %s", op, originator); *process = FALSE; *needs_reply = FALSE; *local_notify = TRUE; return TRUE; } skip_is_reply: *process = TRUE; *needs_reply = FALSE; *local_notify = pcmk__str_eq(delegated, OUR_NODENAME, pcmk__str_casei); host = crm_element_value(request, PCMK__XA_CIB_HOST); if (pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei)) { crm_trace("Processing %s request sent to us from %s", op, originator); *needs_reply = TRUE; return TRUE; } else if (host != NULL) { crm_trace("Ignoring %s request intended for CIB manager on %s", op, host); return FALSE; } else if(is_reply == FALSE && pcmk__str_eq(op, CRM_OP_PING, pcmk__str_casei)) { *needs_reply = TRUE; } crm_trace("Processing %s request broadcast by %s call %s on %s " "(local clients will%s be notified)", op, pcmk__s(crm_element_value(request, PCMK__XA_CIB_CLIENTNAME), "client"), pcmk__s(crm_element_value(request, PCMK__XA_CIB_CALLID), "without ID"), originator, (*local_notify? "" : "not")); return TRUE; } /*! * \internal * \brief Forward a CIB request to the appropriate target host(s) * * \param[in] request CIB request to forward */ static void forward_request(xmlNode *request) { const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *section = crm_element_value(request, PCMK__XA_CIB_SECTION); const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const char *originator = crm_element_value(request, PCMK__XA_SRC); const char *client_name = crm_element_value(request, PCMK__XA_CIB_CLIENTNAME); const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); pcmk__node_status_t *peer = NULL; int log_level = LOG_INFO; if (pcmk__str_eq(op, PCMK__CIB_REQUEST_NOOP, pcmk__str_none)) { log_level = LOG_DEBUG; } do_crm_log(log_level, "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)", pcmk__s(op, "invalid"), pcmk__s(section, "all"), pcmk__s(host, "all"), pcmk__s(originator, "local"), pcmk__s(client_name, "unspecified"), pcmk__s(call_id, "unspecified")); crm_xml_add(request, PCMK__XA_CIB_DELEGATED_FROM, OUR_NODENAME); if (host != NULL) { peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); } pcmk__cluster_send_message(peer, pcmk_ipc_based, request); // Return the request to its original state pcmk__xe_remove_attr(request, PCMK__XA_CIB_DELEGATED_FROM); } static void send_peer_reply(xmlNode *msg, const char *originator) { const pcmk__node_status_t *node = NULL; if ((msg == NULL) || (originator == NULL)) { return; } // Send reply via cluster to originating node node = pcmk__get_node(0, originator, NULL, pcmk__node_search_cluster_member); crm_trace("Sending request result to %s only", originator); crm_xml_add(msg, PCMK__XA_CIB_ISREPLYTO, originator); pcmk__cluster_send_message(node, pcmk_ipc_based, msg); } /*! * \internal * \brief Handle an IPC or CPG message containing a request * * \param[in,out] request Request XML * \param[in] privileged Whether privileged commands may be run * (see cib_server_ops[] definition) * \param[in] cib_client IPC client that sent request (or NULL if CPG) * * \return Legacy Pacemaker return code */ int cib_process_request(xmlNode *request, gboolean privileged, const pcmk__client_t *cib_client) { // @TODO: Break into multiple smaller functions uint32_t call_options = cib_none; gboolean process = TRUE; // Whether to process request locally now gboolean is_update = TRUE; // Whether request would modify CIB gboolean needs_reply = TRUE; // Whether to build a reply gboolean local_notify = FALSE; // Whether to notify (local) requester gboolean needs_forward = FALSE; // Whether to forward request somewhere else xmlNode *op_reply = NULL; xmlNode *result_diff = NULL; int rc = pcmk_ok; const char *op = crm_element_value(request, PCMK__XA_CIB_OP); const char *originator = crm_element_value(request, PCMK__XA_SRC); const char *host = crm_element_value(request, PCMK__XA_CIB_HOST); const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); const char *client_id = crm_element_value(request, PCMK__XA_CIB_CLIENTID); const char *client_name = crm_element_value(request, PCMK__XA_CIB_CLIENTNAME); const char *reply_to = crm_element_value(request, PCMK__XA_CIB_ISREPLYTO); const cib__operation_t *operation = NULL; cib__op_fn_t op_function = NULL; rc = pcmk__xe_get_flags(request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } if ((host != NULL) && (*host == '\0')) { host = NULL; } if (cib_client == NULL) { crm_trace("Processing peer %s operation from %s/%s on %s intended for %s (reply=%s)", op, pcmk__s(client_name, "client"), call_id, originator, pcmk__s(host, "all"), reply_to); } else { crm_xml_add(request, PCMK__XA_SRC, OUR_NODENAME); crm_trace("Processing local %s operation from %s/%s intended for %s", op, pcmk__s(client_name, "client"), call_id, pcmk__s(host, "all")); } rc = cib__get_operation(op, &operation); rc = pcmk_rc2legacy(rc); if (rc != pcmk_ok) { /* TODO: construct error reply? */ crm_err("Pre-processing of command failed: %s", pcmk_strerror(rc)); return rc; } op_function = based_get_op_function(operation); if (op_function == NULL) { crm_err("Operation %s not supported by CIB manager", op); return -EOPNOTSUPP; } if (cib_client != NULL) { parse_local_options(cib_client, operation, host, op, &local_notify, &needs_reply, &process, &needs_forward); } else if (!parse_peer_options(operation, request, &local_notify, &needs_reply, &process)) { return rc; } if (pcmk_is_set(call_options, cib_transaction)) { /* All requests in a transaction are processed locally against a working * CIB copy, and we don't notify for individual requests because the * entire transaction is atomic. * * We still call the option parser functions above, for the sake of log * messages and checking whether we're the target for peer requests. */ process = TRUE; needs_reply = FALSE; local_notify = FALSE; needs_forward = FALSE; } is_update = pcmk_is_set(operation->flags, cib__op_attr_modifies); if (pcmk_is_set(call_options, cib_discard_reply)) { /* If the request will modify the CIB, and we are in legacy mode, we * need to build a reply so we can broadcast a diff, even if the * requester doesn't want one. */ needs_reply = FALSE; local_notify = FALSE; crm_trace("Client is not interested in the reply"); } if (needs_forward) { forward_request(request); return rc; } if (cib_status != pcmk_ok) { rc = cib_status; crm_err("Ignoring request because cluster configuration is invalid " "(please repair and restart): %s", pcmk_strerror(rc)); op_reply = create_cib_reply(op, call_id, client_id, call_options, rc, the_cib); } else if (process) { time_t finished = 0; time_t now = time(NULL); int level = LOG_INFO; const char *section = crm_element_value(request, PCMK__XA_CIB_SECTION); const char *admin_epoch_s = NULL; const char *epoch_s = NULL; const char *num_updates_s = NULL; rc = cib_process_command(request, operation, op_function, &op_reply, &result_diff, privileged); if (!is_update) { level = LOG_TRACE; } else if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { switch (rc) { case pcmk_ok: level = LOG_INFO; break; case -pcmk_err_old_data: case -pcmk_err_diff_resync: case -pcmk_err_diff_failed: level = LOG_TRACE; break; default: level = LOG_ERR; } } else if (rc != pcmk_ok) { level = LOG_WARNING; } if (the_cib != NULL) { admin_epoch_s = crm_element_value(the_cib, PCMK_XA_ADMIN_EPOCH); epoch_s = crm_element_value(the_cib, PCMK_XA_EPOCH); num_updates_s = crm_element_value(the_cib, PCMK_XA_NUM_UPDATES); } do_crm_log(level, "Completed %s operation for section %s: %s (rc=%d, origin=%s/%s/%s, version=%s.%s.%s)", op, section ? section : "'all'", pcmk_strerror(rc), rc, originator ? originator : "local", pcmk__s(client_name, "client"), call_id, pcmk__s(admin_epoch_s, "0"), pcmk__s(epoch_s, "0"), pcmk__s(num_updates_s, "0")); finished = time(NULL); if ((finished - now) > 3) { crm_trace("%s operation took %lds to complete", op, (long)(finished - now)); crm_write_blackbox(0, NULL); } if (op_reply == NULL && (needs_reply || local_notify)) { crm_err("Unexpected NULL reply to message"); crm_log_xml_err(request, "null reply"); needs_reply = FALSE; local_notify = FALSE; } } if (is_update) { crm_trace("Completed pre-sync update from %s/%s/%s%s", originator ? originator : "local", pcmk__s(client_name, "client"), call_id, local_notify?" with local notification":""); } else if (!needs_reply || stand_alone) { // This was a non-originating secondary update crm_trace("Completed update as secondary"); } else if ((cib_client == NULL) && !pcmk_is_set(call_options, cib_discard_reply)) { if (is_update == FALSE || result_diff == NULL) { crm_trace("Request not broadcast: R/O call"); } else if (rc != pcmk_ok) { crm_trace("Request not broadcast: call failed: %s", pcmk_strerror(rc)); } else { crm_trace("Directing reply to %s", originator); } send_peer_reply(op_reply, originator); } if (local_notify && client_id) { crm_trace("Performing local %ssync notification for %s", (pcmk_is_set(call_options, cib_sync_call)? "" : "a"), client_id); if (process == FALSE) { do_local_notify(request, client_id, pcmk_is_set(call_options, cib_sync_call), (cib_client == NULL)); } else { do_local_notify(op_reply, client_id, pcmk_is_set(call_options, cib_sync_call), (cib_client == NULL)); } } pcmk__xml_free(op_reply); pcmk__xml_free(result_diff); return rc; } /*! * \internal * \brief Get a CIB operation's input from the request XML * * \param[in] request CIB request XML * \param[in] type CIB operation type * \param[out] section Where to store CIB section name * * \return Input XML for CIB operation * * \note If not \c NULL, the return value is a non-const pointer to part of * \p request. The caller should not free it directly. */ static xmlNode * prepare_input(const xmlNode *request, enum cib__op_type type, const char **section) { xmlNode *wrapper = pcmk__xe_first_child(request, PCMK__XE_CIB_CALLDATA, NULL, NULL); xmlNode *input = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (type == cib__op_apply_patch) { *section = NULL; } else { *section = crm_element_value(request, PCMK__XA_CIB_SECTION); } // Grab the specified section if ((*section != NULL) && pcmk__xe_is(input, PCMK_XE_CIB)) { input = pcmk_find_cib_element(input, *section); } return input; } #define XPATH_CONFIG_CHANGE \ "//" PCMK_XE_CHANGE \ "[contains(@" PCMK_XA_PATH ",'/" PCMK_XE_CRM_CONFIG "/')]" static bool contains_config_change(xmlNode *diff) { bool changed = false; if (diff) { xmlXPathObject *xpathObj = pcmk__xpath_search(diff->doc, XPATH_CONFIG_CHANGE); if (pcmk__xpath_num_results(xpathObj) > 0) { changed = true; } xmlXPathFreeObject(xpathObj); } return changed; } static int cib_process_command(xmlNode *request, const cib__operation_t *operation, cib__op_fn_t op_function, xmlNode **reply, xmlNode **cib_diff, bool privileged) { xmlNode *input = NULL; xmlNode *output = NULL; xmlNode *result_cib = NULL; uint32_t call_options = cib_none; const char *op = NULL; const char *section = NULL; const char *call_id = crm_element_value(request, PCMK__XA_CIB_CALLID); const char *client_id = crm_element_value(request, PCMK__XA_CIB_CLIENTID); const char *client_name = crm_element_value(request, PCMK__XA_CIB_CLIENTNAME); const char *originator = crm_element_value(request, PCMK__XA_SRC); int rc = pcmk_ok; bool config_changed = false; bool manage_counters = true; static mainloop_timer_t *digest_timer = NULL; pcmk__assert(cib_status == pcmk_ok); if(digest_timer == NULL) { digest_timer = mainloop_timer_add("digester", 5000, FALSE, cib_digester_cb, NULL); } *reply = NULL; *cib_diff = NULL; /* Start processing the request... */ op = crm_element_value(request, PCMK__XA_CIB_OP); rc = pcmk__xe_get_flags(request, PCMK__XA_CIB_CALLOPT, &call_options, cib_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } if (!privileged && pcmk_is_set(operation->flags, cib__op_attr_privileged)) { rc = -EACCES; crm_trace("Failed due to lack of privileges: %s", pcmk_strerror(rc)); goto done; } input = prepare_input(request, operation->type, §ion); if (!pcmk_is_set(operation->flags, cib__op_attr_modifies)) { rc = cib_perform_op(NULL, op, call_options, op_function, true, section, request, input, false, &config_changed, &the_cib, &result_cib, NULL, &output); CRM_CHECK(result_cib == NULL, pcmk__xml_free(result_cib)); goto done; } /* @COMPAT: Handle a valid write action (legacy) * * @TODO: Re-evaluate whether this is all truly legacy. The cib_force_diff * portion is. However, PCMK__XA_CIB_UPDATE may be set by a sync operation * even in non-legacy mode, and manage_counters tells xml_create_patchset() * whether to update version/epoch info. */ if (pcmk__xe_attr_is_true(request, PCMK__XA_CIB_UPDATE)) { manage_counters = false; cib__set_call_options(call_options, "call", cib_force_diff); crm_trace("Global update detected"); CRM_LOG_ASSERT(pcmk__str_any_of(op, PCMK__CIB_REQUEST_APPLY_PATCH, PCMK__CIB_REQUEST_REPLACE, NULL)); } ping_modified_since = TRUE; // result_cib must not be modified after cib_perform_op() returns rc = cib_perform_op(NULL, op, call_options, op_function, false, section, request, input, manage_counters, &config_changed, &the_cib, &result_cib, cib_diff, &output); /* Always write to disk for successful ops with the flag set. This also * negates the need to detect ordering changes. */ if ((rc == pcmk_ok) && pcmk_is_set(operation->flags, cib__op_attr_writes_through)) { config_changed = true; } if ((rc == pcmk_ok) && !pcmk_any_flags_set(call_options, cib_dryrun|cib_transaction)) { if (result_cib != the_cib) { if (pcmk_is_set(operation->flags, cib__op_attr_writes_through)) { config_changed = true; } crm_trace("Activating %s->%s%s", crm_element_value(the_cib, PCMK_XA_NUM_UPDATES), crm_element_value(result_cib, PCMK_XA_NUM_UPDATES), (config_changed? " changed" : "")); rc = activateCibXml(result_cib, config_changed, op); if (rc != pcmk_ok) { crm_err("Failed to activate new CIB: %s", pcmk_strerror(rc)); } } if ((rc == pcmk_ok) && contains_config_change(*cib_diff)) { cib_read_config(config_hash, result_cib); } /* @COMPAT Nodes older than feature set 3.19.0 don't support * transactions. In a mixed-version cluster with nodes <3.19.0, we must * sync the updated CIB, so that the older nodes receive the changes. * Any node that has already applied the transaction will ignore the * synced CIB. * * To ensure the updated CIB is synced from only one node, we sync it * from the originator. */ if ((operation->type == cib__op_commit_transact) && pcmk__str_eq(originator, OUR_NODENAME, pcmk__str_casei) && compare_version(crm_element_value(the_cib, PCMK_XA_CRM_FEATURE_SET), "3.19.0") < 0) { sync_our_cib(request, TRUE); } mainloop_timer_stop(digest_timer); mainloop_timer_start(digest_timer); } else if (rc == -pcmk_err_schema_validation) { pcmk__assert(result_cib != the_cib); if (output != NULL) { crm_log_xml_info(output, "cib:output"); pcmk__xml_free(output); } output = result_cib; } else { crm_trace("Not activating %d %d %s", rc, pcmk_is_set(call_options, cib_dryrun), crm_element_value(result_cib, PCMK_XA_NUM_UPDATES)); if (result_cib != the_cib) { pcmk__xml_free(result_cib); } } if (!pcmk_any_flags_set(call_options, cib_dryrun|cib_inhibit_notify|cib_transaction)) { crm_trace("Sending notifications %d", pcmk_is_set(call_options, cib_dryrun)); cib_diff_notify(op, rc, call_id, client_id, client_name, originator, input, *cib_diff); } pcmk__log_xml_patchset(LOG_TRACE, *cib_diff); done: if (!pcmk_is_set(call_options, cib_discard_reply)) { *reply = create_cib_reply(op, call_id, client_id, call_options, rc, output); } if (output != the_cib) { pcmk__xml_free(output); } crm_trace("done"); return rc; } void cib_peer_callback(xmlNode * msg, void *private_data) { const char *reason = NULL; const char *originator = crm_element_value(msg, PCMK__XA_SRC); if (pcmk__peer_cache == NULL) { reason = "membership not established"; goto bail; } if (crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME) == NULL) { crm_xml_add(msg, PCMK__XA_CIB_CLIENTNAME, originator); } /* crm_log_xml_trace(msg, "Peer[inbound]"); */ cib_process_request(msg, TRUE, NULL); return; bail: if (reason) { const char *op = crm_element_value(msg, PCMK__XA_CIB_OP); crm_warn("Discarding %s message from %s: %s", op, originator, reason); } } static gboolean cib_force_exit(gpointer data) { crm_notice("Exiting immediately after %s without shutdown acknowledgment", pcmk__readable_interval(EXIT_ESCALATION_MS)); terminate_cib(CRM_EX_ERROR); return FALSE; } static void disconnect_remote_client(gpointer key, gpointer value, gpointer user_data) { pcmk__client_t *a_client = value; crm_err("Can't disconnect client %s: Not implemented", pcmk__client_name(a_client)); } static void initiate_exit(void) { int active = 0; xmlNode *leaving = NULL; active = pcmk__cluster_num_active_nodes(); if (active < 2) { // This is the last active node crm_info("Exiting without sending shutdown request (no active peers)"); terminate_cib(CRM_EX_OK); return; } crm_info("Sending shutdown request to %d peers", active); leaving = pcmk__xe_create(NULL, PCMK__XE_EXIT_NOTIFICATION); crm_xml_add(leaving, PCMK__XA_T, PCMK__VALUE_CIB); crm_xml_add(leaving, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_SHUTDOWN); pcmk__cluster_send_message(NULL, pcmk_ipc_based, leaving); pcmk__xml_free(leaving); pcmk__create_timer(EXIT_ESCALATION_MS, cib_force_exit, NULL); } void cib_shutdown(int nsig) { struct qb_ipcs_stats srv_stats; if (cib_shutdown_flag == FALSE) { int disconnects = 0; qb_ipcs_connection_t *c = NULL; cib_shutdown_flag = TRUE; c = qb_ipcs_connection_first_get(ipcs_rw); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_rw, last); crm_debug("Disconnecting r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_ro); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_ro, last); crm_debug("Disconnecting r/o client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } c = qb_ipcs_connection_first_get(ipcs_shm); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(ipcs_shm, last); crm_debug("Disconnecting non-blocking r/w client %p...", last); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); disconnects++; } disconnects += pcmk__ipc_client_count(); crm_debug("Disconnecting %d remote clients", pcmk__ipc_client_count()); pcmk__foreach_ipc_client(disconnect_remote_client, NULL); crm_info("Disconnected %d clients", disconnects); } qb_ipcs_stats_get(ipcs_rw, &srv_stats, QB_FALSE); if (pcmk__ipc_client_count() == 0) { crm_info("All clients disconnected (%d)", srv_stats.active_connections); initiate_exit(); } else { crm_info("Waiting on %d clients to disconnect (%d)", pcmk__ipc_client_count(), srv_stats.active_connections); } } extern int remote_fd; extern int remote_tls_fd; /*! * \internal * \brief Close remote sockets, free the global CIB and quit * * \param[in] exit_status What exit status to use (if -1, use CRM_EX_OK, but * skip disconnecting from the cluster layer) */ void terminate_cib(int exit_status) { if (remote_fd > 0) { close(remote_fd); remote_fd = 0; } if (remote_tls_fd > 0) { close(remote_tls_fd); remote_tls_fd = 0; } uninitializeCib(); // Exit immediately on error if (exit_status > CRM_EX_OK) { pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(exit_status); return; } if ((mainloop != NULL) && g_main_loop_is_running(mainloop)) { /* Quit via returning from the main loop. If exit_status has the special * value -1, we skip the disconnect here, and it will be done when the * main loop returns (this allows the peer status callback to avoid * messing with the peer caches). */ if (exit_status == CRM_EX_OK) { pcmk_cluster_disconnect(crm_cluster); } g_main_loop_quit(mainloop); return; } /* Exit cleanly. Even the peer status callback can disconnect here, because * we're not returning control to the caller. */ pcmk_cluster_disconnect(crm_cluster); pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm); crm_exit(CRM_EX_OK); } diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c index e2bf3d1dc1..220f830566 100644 --- a/daemons/controld/controld_control.c +++ b/daemons/controld/controld_control.c @@ -1,725 +1,725 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include static qb_ipcs_service_t *ipcs = NULL; static crm_trigger_t *config_read_trigger = NULL; #if SUPPORT_COROSYNC extern gboolean crm_connect_corosync(pcmk_cluster_t *cluster); #endif static void crm_shutdown(int nsig); static gboolean crm_read_options(gpointer user_data); /* A_HA_CONNECT */ void do_ha_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { gboolean registered = FALSE; if (controld_globals.cluster == NULL) { controld_globals.cluster = pcmk_cluster_new(); } if (action & A_HA_DISCONNECT) { pcmk_cluster_disconnect(controld_globals.cluster); crm_info("Disconnected from the cluster"); controld_set_fsa_input_flags(R_HA_DISCONNECTED); } if (action & A_HA_CONNECT) { pcmk__cluster_set_status_callback(&peer_update_callback); pcmk__cluster_set_autoreap(false); #if SUPPORT_COROSYNC if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { registered = crm_connect_corosync(controld_globals.cluster); } #endif // SUPPORT_COROSYNC if (registered) { pcmk__node_status_t *node = controld_get_local_node_status(); controld_election_init(); free(controld_globals.our_uuid); controld_globals.our_uuid = pcmk__str_copy(pcmk__cluster_get_xml_id(node)); if (controld_globals.our_uuid == NULL) { crm_err("Could not obtain local uuid"); registered = FALSE; } } if (!registered) { controld_set_fsa_input_flags(R_HA_DISCONNECTED); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); return; } populate_cib_nodes(controld_node_update_none, __func__); controld_clear_fsa_input_flags(R_HA_DISCONNECTED); crm_info("Connected to the cluster"); } if (action & ~(A_HA_CONNECT | A_HA_DISCONNECT)) { crm_err("Unexpected action %s in %s", fsa_action2string(action), __func__); } } /* A_SHUTDOWN */ void do_shutdown(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { /* just in case */ controld_set_fsa_input_flags(R_SHUTDOWN); controld_disconnect_fencer(FALSE); } /* A_SHUTDOWN_REQ */ void do_shutdown_req(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { xmlNode *msg = NULL; controld_set_fsa_input_flags(R_SHUTDOWN); //controld_set_fsa_input_flags(R_STAYDOWN); crm_info("Sending shutdown request to all peers (DC is %s)", pcmk__s(controld_globals.dc_name, "not set")); msg = pcmk__new_request(pcmk_ipc_controld, CRM_SYSTEM_CRMD, NULL, CRM_SYSTEM_CRMD, CRM_OP_SHUTDOWN_REQ, NULL); if (!pcmk__cluster_send_message(NULL, pcmk_ipc_controld, msg)) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } pcmk__xml_free(msg); } void crmd_fast_exit(crm_exit_t exit_code) { if (pcmk_is_set(controld_globals.fsa_input_register, R_STAYDOWN)) { crm_warn("Inhibiting respawn " QB_XS " remapping exit code %d to %d", exit_code, CRM_EX_FATAL); exit_code = CRM_EX_FATAL; } else if ((exit_code == CRM_EX_OK) && pcmk_is_set(controld_globals.fsa_input_register, R_IN_RECOVERY)) { crm_err("Could not recover from internal error"); exit_code = CRM_EX_ERROR; } if (controld_globals.logger_out != NULL) { controld_globals.logger_out->finish(controld_globals.logger_out, exit_code, true, NULL); pcmk__output_free(controld_globals.logger_out); controld_globals.logger_out = NULL; } crm_exit(exit_code); } crm_exit_t crmd_exit(crm_exit_t exit_code) { GMainLoop *mloop = controld_globals.mainloop; static bool in_progress = FALSE; if (in_progress && (exit_code == CRM_EX_OK)) { crm_debug("Exit is already in progress"); return exit_code; } else if(in_progress) { crm_notice("Error during shutdown process, exiting now with status %d (%s)", exit_code, crm_exit_str(exit_code)); crm_write_blackbox(SIGTRAP, NULL); crmd_fast_exit(exit_code); } in_progress = TRUE; crm_trace("Preparing to exit with status %d (%s)", exit_code, crm_exit_str(exit_code)); /* Suppress secondary errors resulting from us disconnecting everything */ controld_set_fsa_input_flags(R_HA_DISCONNECTED); /* Close all IPC servers and clients to ensure any and all shared memory files are cleaned up */ if(ipcs) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; } controld_close_attrd_ipc(); controld_shutdown_schedulerd_ipc(); controld_disconnect_fencer(TRUE); if ((exit_code == CRM_EX_OK) && (controld_globals.mainloop == NULL)) { crm_debug("No mainloop detected"); exit_code = CRM_EX_ERROR; } /* On an error, just get out. * * Otherwise, make the effort to have mainloop exit gracefully so * that it (mostly) cleans up after itself and valgrind has less * to report on - allowing real errors stand out */ if (exit_code != CRM_EX_OK) { crm_notice("Forcing immediate exit with status %d (%s)", exit_code, crm_exit_str(exit_code)); crm_write_blackbox(SIGTRAP, NULL); crmd_fast_exit(exit_code); } /* Clean up as much memory as possible for valgrind */ for (GList *iter = controld_globals.fsa_message_queue; iter != NULL; iter = iter->next) { fsa_data_t *fsa_data = (fsa_data_t *) iter->data; crm_info("Dropping %s: [ state=%s cause=%s origin=%s ]", fsa_input2string(fsa_data->fsa_input), fsa_state2string(controld_globals.fsa_state), fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin); delete_fsa_input(fsa_data); } controld_clear_fsa_input_flags(R_MEMBERSHIP); g_list_free(controld_globals.fsa_message_queue); controld_globals.fsa_message_queue = NULL; controld_free_node_pending_timers(); election_reset(controld_globals.cluster); // Stop any election timer /* Tear down the CIB manager connection, but don't free it yet -- it could * be used when we drain the mainloop later. */ controld_disconnect_cib_manager(); verify_stopped(controld_globals.fsa_state, LOG_WARNING); controld_clear_fsa_input_flags(R_LRM_CONNECTED); lrm_state_destroy_all(); mainloop_destroy_trigger(config_read_trigger); config_read_trigger = NULL; controld_destroy_fsa_trigger(); controld_destroy_transition_trigger(); pcmk__client_cleanup(); pcmk__cluster_destroy_node_caches(); controld_free_fsa_timers(); te_cleanup_stonith_history_sync(NULL, TRUE); controld_free_sched_timer(); free(controld_globals.our_uuid); controld_globals.our_uuid = NULL; free(controld_globals.dc_name); controld_globals.dc_name = NULL; free(controld_globals.dc_version); controld_globals.dc_version = NULL; free(controld_globals.cluster_name); controld_globals.cluster_name = NULL; free(controld_globals.te_uuid); controld_globals.te_uuid = NULL; free_max_generation(); controld_destroy_failed_sync_table(); controld_destroy_outside_events_table(); mainloop_destroy_signal(SIGPIPE); mainloop_destroy_signal(SIGUSR1); mainloop_destroy_signal(SIGTERM); mainloop_destroy_signal(SIGTRAP); /* leave SIGCHLD engaged as we might still want to drain some service-actions */ if (mloop) { GMainContext *ctx = g_main_loop_get_context(controld_globals.mainloop); /* Don't re-enter this block */ controld_globals.mainloop = NULL; /* no signals on final draining anymore */ mainloop_destroy_signal(SIGCHLD); crm_trace("Draining mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx)); { int lpc = 0; while((g_main_context_pending(ctx) && lpc < 10)) { lpc++; crm_trace("Iteration %d", lpc); g_main_context_dispatch(ctx); } } crm_trace("Closing mainloop %d %d", g_main_loop_is_running(mloop), g_main_context_pending(ctx)); g_main_loop_quit(mloop); /* Won't do anything yet, since we're inside it now */ g_main_loop_unref(mloop); } else { mainloop_destroy_signal(SIGCHLD); } cib_delete(controld_globals.cib_conn); controld_globals.cib_conn = NULL; throttle_fini(); pcmk_cluster_free(controld_globals.cluster); controld_globals.cluster = NULL; /* Graceful */ crm_trace("Done preparing for exit with status %d (%s)", exit_code, crm_exit_str(exit_code)); return exit_code; } /* A_EXIT_0, A_EXIT_1 */ void do_exit(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_exit_t exit_code = CRM_EX_OK; if (pcmk_is_set(action, A_EXIT_1)) { exit_code = CRM_EX_ERROR; crm_err("Exiting now due to errors"); } verify_stopped(cur_state, LOG_ERR); crmd_exit(exit_code); } static void sigpipe_ignore(int nsig) { return; } /* A_STARTUP */ void do_startup(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_debug("Registering Signal Handlers"); mainloop_add_signal(SIGTERM, crm_shutdown); mainloop_add_signal(SIGPIPE, sigpipe_ignore); config_read_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, crm_read_options, NULL); controld_init_fsa_trigger(); controld_init_transition_trigger(); crm_debug("Creating CIB manager and executor objects"); controld_globals.cib_conn = cib_new(); lrm_state_init_local(); if (controld_init_fsa_timers() == FALSE) { register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } // \return libqb error code (0 on success, -errno on error) static int32_t accept_controller_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("Accepting new IPC client connection"); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } // \return libqb error code (0 on success, -errno on error) static int32_t dispatch_controller_ipc(qb_ipcs_connection_t * c, void *data, size_t size) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); xmlNode *msg = NULL; rc = pcmk__ipc_msg_append(&client->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - msg = pcmk__client_data2xml(client, client->buffer->data, &id, &flags); + msg = pcmk__client_data2xml(client, &id, &flags); g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (client->buffer != NULL) { g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } return 0; } if (msg == NULL) { pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } pcmk__ipc_send_ack(client, id, flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); pcmk__assert(client->user != NULL); pcmk__update_acl_user(msg, PCMK__XA_CRM_USER, client->user); crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, client->id); if (controld_authorize_ipc_message(msg, client, NULL)) { crm_trace("Processing IPC message from client %s", pcmk__client_name(client)); route_message(C_IPC_MESSAGE, msg); } controld_trigger_fsa(); pcmk__xml_free(msg); return 0; } static int32_t ipc_client_disconnected(qb_ipcs_connection_t *c) { pcmk__client_t *client = pcmk__find_client(c); if (client) { crm_trace("Disconnecting %sregistered client %s (%p/%p)", (client->userdata? "" : "un"), pcmk__client_name(client), c, client); free(client->userdata); pcmk__free_client(client); controld_trigger_fsa(); } return 0; } static void ipc_connection_destroyed(qb_ipcs_connection_t *c) { crm_trace("Connection %p", c); ipc_client_disconnected(c); } /* A_STOP */ void do_stop(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_trace("Closing IPC server"); mainloop_del_ipc_server(ipcs); ipcs = NULL; register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } /* A_STARTED */ void do_started(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { static struct qb_ipcs_service_handlers crmd_callbacks = { .connection_accept = accept_controller_client, .connection_created = NULL, .msg_process = dispatch_controller_ipc, .connection_closed = ipc_client_disconnected, .connection_destroyed = ipc_connection_destroyed }; if (cur_state != S_STARTING) { crm_err("Start cancelled... %s", fsa_state2string(cur_state)); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_MEMBERSHIP)) { crm_info("Delaying start, no membership data (%.16llx)", R_MEMBERSHIP); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) { crm_info("Delaying start, not connected to executor (%.16llx)", R_LRM_CONNECTED); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) { crm_info("Delaying start, CIB not connected (%.16llx)", R_CIB_CONNECTED); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_READ_CONFIG)) { crm_info("Delaying start, Config not read (%.16llx)", R_READ_CONFIG); crmd_fsa_stall(TRUE); return; } else if (!pcmk_is_set(controld_globals.fsa_input_register, R_PEER_DATA)) { crm_info("Delaying start, No peer data (%.16llx)", R_PEER_DATA); crmd_fsa_stall(TRUE); return; } crm_debug("Init server comms"); ipcs = pcmk__serve_controld_ipc(&crmd_callbacks); if (ipcs == NULL) { crm_err("Failed to create IPC server: shutting down and inhibiting respawn"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } else { crm_notice("Pacemaker controller successfully started and accepting connections"); } controld_set_fsa_input_flags(R_ST_REQUIRED); controld_timer_fencer_connect(GINT_TO_POINTER(TRUE)); controld_clear_fsa_input_flags(R_STARTING); register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL); } /* A_RECOVER */ void do_recover(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { controld_set_fsa_input_flags(R_IN_RECOVERY); crm_warn("Fast-tracking shutdown in response to errors"); register_fsa_input(C_FSA_INTERNAL, I_TERMINATE, NULL); } static void config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { const char *value = NULL; GHashTable *config_hash = NULL; crm_time_t *now = crm_time_new(NULL); xmlNode *crmconfig = NULL; xmlNode *alerts = NULL; pcmk_rule_input_t rule_input = { .now = now, }; if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; crm_err("Local CIB query resulted in an error: %s", pcmk_strerror(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); if (rc == -EACCES || rc == -pcmk_err_schema_validation) { crm_err("The cluster is mis-configured - shutting down and staying down"); controld_set_fsa_input_flags(R_STAYDOWN); } goto bail; } crmconfig = output; if ((crmconfig != NULL) && !pcmk__xe_is(crmconfig, PCMK_XE_CRM_CONFIG)) { crmconfig = pcmk__xe_first_child(crmconfig, PCMK_XE_CRM_CONFIG, NULL, NULL); } if (!crmconfig) { fsa_data_t *msg_data = NULL; crm_err("Local CIB query for " PCMK_XE_CRM_CONFIG " section failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); goto bail; } crm_debug("Call %d : Parsing CIB options", call_id); config_hash = pcmk__strkey_table(free, free); pcmk_unpack_nvpair_blocks(crmconfig, PCMK_XE_CLUSTER_PROPERTY_SET, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, &rule_input, config_hash, NULL); // Validate all options, and use defaults if not already present in hash pcmk__validate_cluster_options(config_hash); /* Validate the watchdog timeout in the context of the local node * environment. If invalid, the controller will exit with a fatal error. * * We do this via a wrapper in the controller, so that we call * pcmk__valid_stonith_watchdog_timeout() only if watchdog fencing is * enabled for the local node. Otherwise, we may exit unnecessarily. * * A validator function in libcrmcommon can't act as such a wrapper, because * it doesn't have a stonith API connection or the local node name. */ value = g_hash_table_lookup(config_hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT); controld_verify_stonith_watchdog_timeout(value); value = g_hash_table_lookup(config_hash, PCMK_OPT_NO_QUORUM_POLICY); if (pcmk__strcase_any_of(value, PCMK_VALUE_FENCE, PCMK_VALUE_FENCE_LEGACY, NULL) && (pcmk__locate_sbd() != 0)) { controld_set_global_flags(controld_no_quorum_panic); } value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK); if (crm_is_true(value)) { controld_set_global_flags(controld_shutdown_lock_enabled); } else { controld_clear_global_flags(controld_shutdown_lock_enabled); } value = g_hash_table_lookup(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT); pcmk_parse_interval_spec(value, &controld_globals.shutdown_lock_limit); controld_globals.shutdown_lock_limit /= 1000; value = g_hash_table_lookup(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT); pcmk_parse_interval_spec(value, &controld_globals.node_pending_timeout); controld_globals.node_pending_timeout /= 1000; value = g_hash_table_lookup(config_hash, PCMK_OPT_CLUSTER_NAME); pcmk__str_update(&(controld_globals.cluster_name), value); // Let subcomponents initialize their own static variables controld_configure_election(config_hash); controld_configure_fencing(config_hash); controld_configure_fsa_timers(config_hash); controld_configure_throttle(config_hash); alerts = pcmk__xe_first_child(output, PCMK_XE_ALERTS, NULL, NULL); crmd_unpack_alerts(alerts); controld_set_fsa_input_flags(R_READ_CONFIG); controld_trigger_fsa(); g_hash_table_destroy(config_hash); bail: crm_time_free(now); } /*! * \internal * \brief Trigger read and processing of the configuration * * \param[in] fn Calling function name * \param[in] line Line number where call occurred */ void controld_trigger_config_as(const char *fn, int line) { if (config_read_trigger != NULL) { crm_trace("%s:%d - Triggered config processing", fn, line); mainloop_set_trigger(config_read_trigger); } } gboolean crm_read_options(gpointer user_data) { cib_t *cib_conn = controld_globals.cib_conn; int call_id = cib_conn->cmds->query(cib_conn, "//" PCMK_XE_CRM_CONFIG " | //" PCMK_XE_ALERTS, NULL, cib_xpath); fsa_register_cib_callback(call_id, NULL, config_query_callback); crm_trace("Querying the CIB... call %d", call_id); return TRUE; } /* A_READCONFIG */ void do_read_config(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { throttle_init(); controld_trigger_config(); } static void crm_shutdown(int nsig) { const char *value = NULL; guint default_period_ms = 0; if ((controld_globals.mainloop == NULL) || !g_main_loop_is_running(controld_globals.mainloop)) { crmd_exit(CRM_EX_OK); return; } if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { crm_err("Escalating shutdown"); register_fsa_input_before(C_SHUTDOWN, I_ERROR, NULL); return; } controld_set_fsa_input_flags(R_SHUTDOWN); register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); /* If shutdown timer doesn't have a period set, use the default * * @TODO: Evaluate whether this is still necessary. As long as * config_query_callback() has been run at least once, it doesn't look like * anything could have changed the timer period since then. */ value = pcmk__cluster_option(NULL, PCMK_OPT_SHUTDOWN_ESCALATION); pcmk_parse_interval_spec(value, &default_period_ms); controld_shutdown_start_countdown(default_period_ms); } diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c index b51890ce01..4816496074 100644 --- a/daemons/execd/pacemaker-execd.c +++ b/daemons/execd/pacemaker-execd.c @@ -1,606 +1,606 @@ /* * Copyright 2012-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include // stonith__api_new() #include #include "pacemaker-execd.h" #ifdef PCMK__COMPILE_REMOTE # define EXECD_TYPE "remote" # define EXECD_NAME PCMK__SERVER_REMOTED # define SUMMARY "resource agent executor daemon for Pacemaker Remote nodes" #else # define EXECD_TYPE "local" # define EXECD_NAME PCMK__SERVER_EXECD # define SUMMARY "resource agent executor daemon for Pacemaker cluster nodes" #endif static GMainLoop *mainloop = NULL; static qb_ipcs_service_t *ipcs = NULL; static stonith_t *stonith_api = NULL; int lrmd_call_id = 0; time_t start_time; static struct { gchar **log_files; #ifdef PCMK__COMPILE_REMOTE gchar *port; #endif // PCMK__COMPILE_REMOTE } options; #ifdef PCMK__COMPILE_REMOTE /* whether shutdown request has been sent */ static gboolean shutting_down = FALSE; #endif static void exit_executor(void); static void stonith_connection_destroy_cb(stonith_t * st, stonith_event_t * e) { stonith_api->state = stonith_disconnected; stonith_connection_failed(); } stonith_t * get_stonith_connection(void) { if (stonith_api && stonith_api->state == stonith_disconnected) { stonith__api_free(stonith_api); stonith_api = NULL; } if (stonith_api == NULL) { int rc = pcmk_ok; stonith_api = stonith__api_new(); if (stonith_api == NULL) { crm_err("Could not connect to fencer: API memory allocation failed"); return NULL; } rc = stonith__api_connect_retry(stonith_api, crm_system_name, 10); if (rc != pcmk_rc_ok) { crm_err("Could not connect to fencer in 10 attempts: %s " QB_XS " rc=%d", pcmk_rc_str(rc), rc); stonith__api_free(stonith_api); stonith_api = NULL; } else { stonith_api_operations_t *cmds = stonith_api->cmds; cmds->register_notification(stonith_api, PCMK__VALUE_ST_NOTIFY_DISCONNECT, stonith_connection_destroy_cb); } } return stonith_api; } static int32_t lrmd_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } static void lrmd_ipc_created(qb_ipcs_connection_t * c) { pcmk__client_t *new_client = pcmk__find_client(c); crm_trace("Connection %p", c); pcmk__assert(new_client != NULL); /* Now that the connection is offically established, alert * the other clients a new connection exists. */ notify_of_new_client(new_client); } static int32_t lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); xmlNode *request = NULL; CRM_CHECK(client != NULL, crm_err("Invalid client"); return FALSE); CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client); return FALSE); rc = pcmk__ipc_msg_append(&client->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - request = pcmk__client_data2xml(client, client->buffer->data, &id, &flags); + request = pcmk__client_data2xml(client, &id, &flags); g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (client->buffer != NULL) { g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } return 0; } CRM_CHECK(flags & crm_ipc_client_response, crm_err("Invalid client request: %p", client); return FALSE); if (!request) { return 0; } /* @TODO functionize some of this to reduce duplication with * lrmd_remote_client_msg() */ if (!client->name) { const char *value = crm_element_value(request, PCMK__XA_LRMD_CLIENTNAME); if (value == NULL) { client->name = pcmk__itoa(pcmk__client_pid(c)); } else { client->name = pcmk__str_copy(value); } } lrmd_call_id++; if (lrmd_call_id < 1) { lrmd_call_id = 1; } crm_xml_add(request, PCMK__XA_LRMD_CLIENTID, client->id); crm_xml_add(request, PCMK__XA_LRMD_CLIENTNAME, client->name); crm_xml_add_int(request, PCMK__XA_LRMD_CALLID, lrmd_call_id); process_lrmd_message(client, id, request); pcmk__xml_free(request); return 0; } /*! * \internal * \brief Free a client connection, and exit if appropriate * * \param[in,out] client Client connection to free */ void lrmd_client_destroy(pcmk__client_t *client) { pcmk__free_client(client); #ifdef PCMK__COMPILE_REMOTE /* If we were waiting to shut down, we can now safely do so * if there are no more proxied IPC providers */ if (shutting_down && (ipc_proxy_get_provider() == NULL)) { exit_executor(); } #endif } static int32_t lrmd_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); client_disconnect_cleanup(client->id); #ifdef PCMK__COMPILE_REMOTE ipc_proxy_remove_provider(client); #endif lrmd_client_destroy(client); return 0; } static void lrmd_ipc_destroy(qb_ipcs_connection_t * c) { lrmd_ipc_closed(c); crm_trace("Connection %p", c); } static struct qb_ipcs_service_handlers lrmd_ipc_callbacks = { .connection_accept = lrmd_ipc_accept, .connection_created = lrmd_ipc_created, .msg_process = lrmd_ipc_dispatch, .connection_closed = lrmd_ipc_closed, .connection_destroyed = lrmd_ipc_destroy }; // \return Standard Pacemaker return code int lrmd_server_send_reply(pcmk__client_t *client, uint32_t id, xmlNode *reply) { crm_trace("Sending reply (%d) to client (%s)", id, client->id); switch (PCMK__CLIENT_TYPE(client)) { case pcmk__client_ipc: return pcmk__ipc_send_xml(client, id, reply, FALSE); #ifdef PCMK__COMPILE_REMOTE case pcmk__client_tls: return lrmd__remote_send_xml(client->remote, reply, id, "reply"); #endif default: crm_err("Could not send reply: unknown type for client %s " QB_XS " flags=%#llx", pcmk__client_name(client), client->flags); } return ENOTCONN; } // \return Standard Pacemaker return code int lrmd_server_send_notify(pcmk__client_t *client, xmlNode *msg) { crm_trace("Sending notification to client (%s)", client->id); switch (PCMK__CLIENT_TYPE(client)) { case pcmk__client_ipc: if (client->ipcs == NULL) { crm_trace("Could not notify local client: disconnected"); return ENOTCONN; } return pcmk__ipc_send_xml(client, 0, msg, crm_ipc_server_event); #ifdef PCMK__COMPILE_REMOTE case pcmk__client_tls: if (client->remote == NULL) { crm_trace("Could not notify remote client: disconnected"); return ENOTCONN; } else { return lrmd__remote_send_xml(client->remote, msg, 0, "notify"); } #endif default: crm_err("Could not notify client %s with unknown transport " QB_XS " flags=%#llx", pcmk__client_name(client), client->flags); } return ENOTCONN; } /*! * \internal * \brief Clean up and exit immediately */ static void exit_executor(void) { const guint nclients = pcmk__ipc_client_count(); crm_info("Terminating with %d client%s", nclients, pcmk__plural_s(nclients)); stonith__api_free(stonith_api); if (ipcs) { mainloop_del_ipc_server(ipcs); } #ifdef PCMK__COMPILE_REMOTE execd_stop_tls_server(); ipc_proxy_cleanup(); #endif pcmk__client_cleanup(); if (mainloop) { lrmd_drain_alerts(mainloop); } g_hash_table_destroy(rsc_list); // @TODO End mainloop instead so all cleanup is done crm_exit(CRM_EX_OK); } /*! * \internal * \brief Request cluster shutdown if appropriate, otherwise exit immediately * * \param[in] nsig Signal that caused invocation (ignored) */ static void lrmd_shutdown(int nsig) { #ifdef PCMK__COMPILE_REMOTE pcmk__client_t *ipc_proxy = ipc_proxy_get_provider(); /* If there are active proxied IPC providers, then we may be running * resources, so notify the cluster that we wish to shut down. */ if (ipc_proxy) { if (shutting_down) { crm_notice("Waiting for cluster to stop resources before exiting"); return; } crm_info("Sending shutdown request to cluster"); if (ipc_proxy_shutdown_req(ipc_proxy) < 0) { crm_crit("Shutdown request failed, exiting immediately"); } else { /* We requested a shutdown. Now, we need to wait for an * acknowledgement from the proxy host, then wait for all proxy * hosts to disconnect (which ensures that all resources have been * stopped). */ shutting_down = TRUE; /* Stop accepting new proxy connections */ execd_stop_tls_server(); /* Currently, we let the OS kill us if the clients don't disconnect * in a reasonable time. We could instead set a long timer here * (shorter than what the OS is likely to use) and exit immediately * if it pops. */ return; } } #endif exit_executor(); } /*! * \internal * \brief Log a shutdown acknowledgment */ void handle_shutdown_ack(void) { #ifdef PCMK__COMPILE_REMOTE if (shutting_down) { crm_info("IPC proxy provider acknowledged shutdown request"); return; } #endif crm_debug("Ignoring unexpected shutdown acknowledgment " "from IPC proxy provider"); } /*! * \internal * \brief Handle rejection of shutdown request */ void handle_shutdown_nack(void) { #ifdef PCMK__COMPILE_REMOTE if (shutting_down) { crm_info("Exiting immediately after IPC proxy provider " "indicated no resources will be stopped"); exit_executor(); return; } #endif crm_debug("Ignoring unexpected shutdown rejection from IPC proxy provider"); } static GOptionEntry entries[] = { { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY, &options.log_files, "Send logs to the additional named logfile", NULL }, #ifdef PCMK__COMPILE_REMOTE { "port", 'p', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &options.port, "Port to listen on (defaults to " G_STRINGIFY(DEFAULT_REMOTE_PORT) ")", NULL }, #endif // PCMK__COMPILE_REMOTE { NULL } }; static pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); pcmk__add_main_args(context, entries); return context; } int main(int argc, char **argv, char **envp) { int rc = pcmk_rc_ok; crm_exit_t exit_code = CRM_EX_OK; const char *option = NULL; pcmk__output_t *out = NULL; GError *error = NULL; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = NULL; gchar **processed_args = NULL; GOptionContext *context = NULL; #ifdef PCMK__COMPILE_REMOTE // If necessary, create PID 1 now before any file descriptors are opened remoted_spawn_pidone(argc, argv, envp); #endif args = pcmk__new_common_args(SUMMARY); #ifdef PCMK__COMPILE_REMOTE processed_args = pcmk__cmdline_preproc(argv, "lp"); #else processed_args = pcmk__cmdline_preproc(argv, "l"); #endif // PCMK__COMPILE_REMOTE context = build_arg_context(args, &output_group); crm_log_preinit(EXECD_NAME, argc, argv); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } if (args->version) { out->version(out, false); goto done; } // Open additional log files if (options.log_files != NULL) { for (gchar **fname = options.log_files; *fname != NULL; fname++) { rc = pcmk__add_logfile(*fname); if (rc != pcmk_rc_ok) { out->err(out, "Logging to %s is disabled: %s", *fname, pcmk_rc_str(rc)); } } } pcmk__cli_init_logging(EXECD_NAME, args->verbosity); crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); // ocf_log() (in resource-agents) uses the capitalized env options below option = pcmk__env_option(PCMK__ENV_LOGFACILITY); if (!pcmk__str_eq(option, PCMK_VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches) && !pcmk__str_eq(option, "/dev/null", pcmk__str_none)) { pcmk__set_env_option("LOGFACILITY", option, true); } option = pcmk__env_option(PCMK__ENV_LOGFILE); if (!pcmk__str_eq(option, PCMK_VALUE_NONE, pcmk__str_casei|pcmk__str_null_matches)) { pcmk__set_env_option("LOGFILE", option, true); if (pcmk__env_option_enabled(crm_system_name, PCMK__ENV_DEBUG)) { pcmk__set_env_option("DEBUGLOG", option, true); } } #ifdef PCMK__COMPILE_REMOTE if (options.port != NULL) { pcmk__set_env_option(PCMK__ENV_REMOTE_PORT, options.port, false); } #endif // PCMK__COMPILE_REMOTE start_time = time(NULL); crm_notice("Starting Pacemaker " EXECD_TYPE " executor"); /* The presence of this variable allegedly controls whether child * processes like httpd will try and use Systemd's sd_notify * API */ unsetenv("NOTIFY_SOCKET"); { // Temporary directory for resource agent use (leave owned by root) int rc = pcmk__build_path(PCMK__OCF_TMP_DIR, 0755); if (rc != pcmk_rc_ok) { crm_warn("Could not create resource agent temporary directory " PCMK__OCF_TMP_DIR ": %s", pcmk_rc_str(rc)); } } rsc_list = pcmk__strkey_table(NULL, free_rsc); ipcs = mainloop_add_ipc_server(CRM_SYSTEM_LRMD, QB_IPC_SHM, &lrmd_ipc_callbacks); if (ipcs == NULL) { crm_err("Failed to create IPC server: shutting down and inhibiting respawn"); exit_code = CRM_EX_FATAL; goto done; } #ifdef PCMK__COMPILE_REMOTE if (lrmd_init_remote_tls_server() < 0) { crm_err("Failed to create TLS listener: shutting down and staying down"); exit_code = CRM_EX_FATAL; goto done; } ipc_proxy_init(); #endif mainloop_add_signal(SIGTERM, lrmd_shutdown); mainloop = g_main_loop_new(NULL, FALSE); crm_notice("Pacemaker " EXECD_TYPE " executor successfully started and accepting connections"); crm_notice("OCF resource agent search path is %s", PCMK__OCF_RA_PATH); g_main_loop_run(mainloop); /* should never get here */ exit_executor(); done: g_strfreev(options.log_files); #ifdef PCMK__COMPILE_REMOTE g_free(options.port); #endif // PCMK__COMPILE_REMOTE g_strfreev(processed_args); pcmk__free_arg_context(context); pcmk__output_and_clear_error(&error, out); if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); crm_exit(exit_code); } diff --git a/daemons/execd/remoted_proxy.c b/daemons/execd/remoted_proxy.c index cdae7ef1bd..ef421a32a0 100644 --- a/daemons/execd/remoted_proxy.c +++ b/daemons/execd/remoted_proxy.c @@ -1,508 +1,508 @@ /* * Copyright 2012-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include "pacemaker-execd.h" #include #include #include #include #include #include #include #include static qb_ipcs_service_t *cib_ro = NULL; static qb_ipcs_service_t *cib_rw = NULL; static qb_ipcs_service_t *cib_shm = NULL; static qb_ipcs_service_t *attrd_ipcs = NULL; static qb_ipcs_service_t *crmd_ipcs = NULL; static qb_ipcs_service_t *stonith_ipcs = NULL; static qb_ipcs_service_t *pacemakerd_ipcs = NULL; // An IPC provider is a cluster node controller connecting as a client static GList *ipc_providers = NULL; /* ipc clients == things like cibadmin, crm_resource, connecting locally * * @TODO This should be unnecessary (pcmk__foreach_ipc_client() should be * sufficient) */ static GHashTable *ipc_clients = NULL; /*! * \internal * \brief Get an IPC proxy provider * * \return Pointer to a provider if one exists, NULL otherwise * * \note Grab the first provider, which is the most recent connection. That way, * if we haven't yet timed out an old, failed connection, we don't try to * use it. */ pcmk__client_t * ipc_proxy_get_provider(void) { return ipc_providers? (pcmk__client_t *) (ipc_providers->data) : NULL; } /*! * \internal * \brief Accept a client connection on a proxy IPC server * * \param[in] c Client's IPC connection * \param[in] uid Client's user ID * \param[in] gid Client's group ID * \param[in] ipc_channel Name of IPC server to proxy * * \return pcmk_ok on success, -errno on error */ static int32_t ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc_channel) { pcmk__client_t *client; pcmk__client_t *ipc_proxy = ipc_proxy_get_provider(); xmlNode *msg; if (ipc_proxy == NULL) { crm_warn("Cannot proxy IPC connection from uid %d gid %d to %s " "because not connected to cluster", uid, gid, ipc_channel); return -EREMOTEIO; } /* This new client is a local IPC client on a Pacemaker Remote controlled * node, needing to access cluster node IPC services. */ client = pcmk__new_client(c, uid, gid); if (client == NULL) { return -ENOMEM; } /* This ipc client is bound to a single ipc provider. If the * provider goes away, this client is disconnected */ client->userdata = pcmk__str_copy(ipc_proxy->id); client->name = crm_strdup_printf("proxy-%s-%d-%.8s", ipc_channel, client->pid, client->id); /* Allow remote executor to distinguish between proxied local clients and * actual executor API clients */ pcmk__set_client_flags(client, pcmk__client_to_proxy); g_hash_table_insert(ipc_clients, client->id, client); msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_NEW); crm_xml_add(msg, PCMK__XA_LRMD_IPC_SERVER, ipc_channel); crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, client->id); lrmd_server_send_notify(ipc_proxy, msg); pcmk__xml_free(msg); crm_debug("Accepted IPC proxy connection (session ID %s) " "from uid %d gid %d on channel %s", client->id, uid, gid, ipc_channel); return 0; } static int32_t crmd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, CRM_SYSTEM_CRMD); } static int32_t attrd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, PCMK__VALUE_ATTRD); } static int32_t stonith_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, "stonith-ng"); } static int32_t pacemakerd_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return -EREMOTEIO; } static int32_t cib_proxy_accept_rw(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RW); } static int32_t cib_proxy_accept_ro(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { return ipc_proxy_accept(c, uid, gid, PCMK__SERVER_BASED_RO); } void ipc_proxy_forward_client(pcmk__client_t *ipc_proxy, xmlNode *xml) { const char *session = crm_element_value(xml, PCMK__XA_LRMD_IPC_SESSION); const char *msg_type = crm_element_value(xml, PCMK__XA_LRMD_IPC_OP); xmlNode *wrapper = pcmk__xe_first_child(xml, PCMK__XE_LRMD_IPC_MSG, NULL, NULL); xmlNode *msg = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); pcmk__client_t *ipc_client; int rc = pcmk_rc_ok; if (pcmk__str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_ACK, pcmk__str_casei)) { handle_shutdown_ack(); return; } if (pcmk__str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_NACK, pcmk__str_casei)) { handle_shutdown_nack(); return; } ipc_client = pcmk__find_client_by_id(session); if (ipc_client == NULL) { xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, session); lrmd_server_send_notify(ipc_proxy, msg); pcmk__xml_free(msg); return; } /* This is an event or response from the ipc provider * going to the local ipc client. * * Looking at the chain of events. * * -----remote node----------------|---- cluster node ------ * ipc_client <--1--> this code * <--2--> pacemaker-controld:remote_proxy_cb/remote_proxy_relay_event() * <--3--> ipc server * * This function is receiving a msg from connection 2 * and forwarding it to connection 1. */ if (pcmk__str_eq(msg_type, LRMD_IPC_OP_EVENT, pcmk__str_casei)) { crm_trace("Sending event to %s", ipc_client->id); rc = pcmk__ipc_send_xml(ipc_client, 0, msg, crm_ipc_server_event); } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_RESPONSE, pcmk__str_casei)) { int msg_id = 0; crm_element_value_int(xml, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id); crm_trace("Sending response to %d - %s", ipc_client->request_id, ipc_client->id); rc = pcmk__ipc_send_xml(ipc_client, msg_id, msg, FALSE); CRM_LOG_ASSERT(msg_id == ipc_client->request_id); ipc_client->request_id = 0; } else if (pcmk__str_eq(msg_type, LRMD_IPC_OP_DESTROY, pcmk__str_casei)) { qb_ipcs_disconnect(ipc_client->ipcs); } else { crm_err("Unknown ipc proxy msg type %s" , msg_type); } if (rc != pcmk_rc_ok) { crm_warn("Could not proxy IPC to client %s: %s " QB_XS " rc=%d", ipc_client->id, pcmk_rc_str(rc), rc); } } static int32_t ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; pcmk__client_t *client = pcmk__find_client(c); pcmk__client_t *ipc_proxy = pcmk__find_client_by_id(client->userdata); xmlNode *wrapper = NULL; xmlNode *request = NULL; xmlNode *msg = NULL; if (!ipc_proxy) { qb_ipcs_disconnect(client->ipcs); return 0; } /* This is a request from the local ipc client going * to the ipc provider. * * Looking at the chain of events. * * -----remote node----------------|---- cluster node ------ * ipc_client <--1--> this code * <--2--> pacemaker-controld:remote_proxy_dispatch_internal() * <--3--> ipc server * * This function is receiving a request from connection * 1 and forwarding it to connection 2. */ rc = pcmk__ipc_msg_append(&client->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - request = pcmk__client_data2xml(client, client->buffer->data, &id, &flags); + request = pcmk__client_data2xml(client, &id, &flags); g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (client->buffer != NULL) { g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } return 0; } if (!request) { return 0; } CRM_CHECK(client != NULL, crm_err("Invalid client"); pcmk__xml_free(request); return FALSE); CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client); pcmk__xml_free(request); return FALSE); /* This ensures that synced request/responses happen over the event channel * in the controller, allowing the controller to process the messages async. */ pcmk__set_ipc_flags(flags, pcmk__client_name(client), crm_ipc_proxied); client->request_id = id; msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_REQUEST); crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, client->id); crm_xml_add(msg, PCMK__XA_LRMD_IPC_CLIENT, pcmk__client_name(client)); crm_xml_add(msg, PCMK__XA_LRMD_IPC_USER, client->user); crm_xml_add_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, id); crm_xml_add_int(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, flags); wrapper = pcmk__xe_create(msg, PCMK__XE_LRMD_IPC_MSG); pcmk__xml_copy(wrapper, request); lrmd_server_send_notify(ipc_proxy, msg); pcmk__xml_free(request); pcmk__xml_free(msg); return 0; } /*! * \internal * \brief Notify a proxy provider that we wish to shut down * * \param[in,out] ipc_proxy IPC client connection to proxy provider * * \return 0 on success, -1 on error */ int ipc_proxy_shutdown_req(pcmk__client_t *ipc_proxy) { xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); int rc; crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_REQ); /* We don't really have a session, but the controller needs this attribute * to recognize this as proxy communication. */ crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, "0"); rc = (lrmd_server_send_notify(ipc_proxy, msg) != pcmk_rc_ok)? -1 : 0; pcmk__xml_free(msg); return rc; } static int32_t ipc_proxy_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); pcmk__client_t *ipc_proxy; if (client == NULL) { return 0; } ipc_proxy = pcmk__find_client_by_id(client->userdata); crm_trace("Connection %p", c); if (ipc_proxy) { xmlNode *msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_IPC_PROXY); crm_xml_add(msg, PCMK__XA_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); crm_xml_add(msg, PCMK__XA_LRMD_IPC_SESSION, client->id); lrmd_server_send_notify(ipc_proxy, msg); pcmk__xml_free(msg); } g_hash_table_remove(ipc_clients, client->id); free(client->userdata); client->userdata = NULL; pcmk__free_client(client); return 0; } static void ipc_proxy_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); ipc_proxy_closed(c); } static struct qb_ipcs_service_handlers crmd_proxy_callbacks = { .connection_accept = crmd_proxy_accept, .connection_created = NULL, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers attrd_proxy_callbacks = { .connection_accept = attrd_proxy_accept, .connection_created = NULL, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers stonith_proxy_callbacks = { .connection_accept = stonith_proxy_accept, .connection_created = NULL, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers pacemakerd_proxy_callbacks = { .connection_accept = pacemakerd_proxy_accept, .connection_created = NULL, .msg_process = NULL, .connection_closed = NULL, .connection_destroyed = NULL }; static struct qb_ipcs_service_handlers cib_proxy_callbacks_ro = { .connection_accept = cib_proxy_accept_ro, .connection_created = NULL, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; static struct qb_ipcs_service_handlers cib_proxy_callbacks_rw = { .connection_accept = cib_proxy_accept_rw, .connection_created = NULL, .msg_process = ipc_proxy_dispatch, .connection_closed = ipc_proxy_closed, .connection_destroyed = ipc_proxy_destroy }; void ipc_proxy_add_provider(pcmk__client_t *ipc_proxy) { // Prepending ensures the most recent connection is always first ipc_providers = g_list_prepend(ipc_providers, ipc_proxy); } void ipc_proxy_remove_provider(pcmk__client_t *ipc_proxy) { GHashTableIter iter; pcmk__client_t *ipc_client = NULL; char *key = NULL; GList *remove_these = NULL; GList *gIter = NULL; ipc_providers = g_list_remove(ipc_providers, ipc_proxy); g_hash_table_iter_init(&iter, ipc_clients); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & ipc_client)) { const char *proxy_id = ipc_client->userdata; if (pcmk__str_eq(proxy_id, ipc_proxy->id, pcmk__str_casei)) { crm_info("ipc proxy connection for client %s pid %d destroyed because cluster node disconnected.", ipc_client->id, ipc_client->pid); /* we can't remove during the iteration, so copy items * to a list we can destroy later */ remove_these = g_list_append(remove_these, ipc_client); } } for (gIter = remove_these; gIter != NULL; gIter = gIter->next) { ipc_client = gIter->data; // Disconnection callback will free the client here qb_ipcs_disconnect(ipc_client->ipcs); } /* just frees the list, not the elements in the list */ g_list_free(remove_these); } void ipc_proxy_init(void) { ipc_clients = pcmk__strkey_table(NULL, NULL); pcmk__serve_based_ipc(&cib_ro, &cib_rw, &cib_shm, &cib_proxy_callbacks_ro, &cib_proxy_callbacks_rw); pcmk__serve_attrd_ipc(&attrd_ipcs, &attrd_proxy_callbacks); pcmk__serve_fenced_ipc(&stonith_ipcs, &stonith_proxy_callbacks); pcmk__serve_pacemakerd_ipc(&pacemakerd_ipcs, &pacemakerd_proxy_callbacks); crmd_ipcs = pcmk__serve_controld_ipc(&crmd_proxy_callbacks); if (crmd_ipcs == NULL) { crm_err("Failed to create controller: exiting and inhibiting respawn"); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled"); crm_exit(CRM_EX_FATAL); } } void ipc_proxy_cleanup(void) { if (ipc_providers) { g_list_free(ipc_providers); ipc_providers = NULL; } if (ipc_clients) { g_hash_table_destroy(ipc_clients); ipc_clients = NULL; } pcmk__stop_based_ipc(cib_ro, cib_rw, cib_shm); qb_ipcs_destroy(attrd_ipcs); qb_ipcs_destroy(stonith_ipcs); qb_ipcs_destroy(pacemakerd_ipcs); qb_ipcs_destroy(crmd_ipcs); cib_ro = NULL; cib_rw = NULL; cib_shm = NULL; } diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c index 4a3a700136..bea2ca64c2 100644 --- a/daemons/fenced/pacemaker-fenced.c +++ b/daemons/fenced/pacemaker-fenced.c @@ -1,697 +1,697 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include // PRIu32, PRIx32 #include #include #include #include #include #include #include #include #include #include #include #include #define SUMMARY "daemon for executing fencing devices in a Pacemaker cluster" // @TODO This should be guint long long stonith_watchdog_timeout_ms = 0; GList *stonith_watchdog_targets = NULL; static GMainLoop *mainloop = NULL; gboolean stonith_shutdown_flag = FALSE; static qb_ipcs_service_t *ipcs = NULL; static pcmk__output_t *out = NULL; pcmk__supported_format_t formats[] = { PCMK__SUPPORTED_FORMAT_NONE, PCMK__SUPPORTED_FORMAT_TEXT, PCMK__SUPPORTED_FORMAT_XML, { NULL, NULL, NULL } }; static struct { gboolean stand_alone; gchar **log_files; } options; crm_exit_t exit_code = CRM_EX_OK; static void stonith_cleanup(void); static int32_t st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { if (stonith_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", pcmk__client_pid(c)); return -ECONNREFUSED; } if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } /* Exit code means? */ static int32_t st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { uint32_t id = 0; uint32_t flags = 0; uint32_t call_options = st_opt_none; xmlNode *request = NULL; pcmk__client_t *c = pcmk__find_client(qbc); const char *op = NULL; int rc = pcmk_rc_ok; if (c == NULL) { crm_info("Invalid client: %p", qbc); return 0; } rc = pcmk__ipc_msg_append(&c->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - request = pcmk__client_data2xml(c, c->buffer->data, &id, &flags); + request = pcmk__client_data2xml(c, &id, &flags); g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (c->buffer != NULL) { g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } return 0; } if (request == NULL) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_NACK, NULL, CRM_EX_PROTOCOL); return 0; } op = crm_element_value(request, PCMK__XA_CRM_TASK); if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) { crm_xml_add(request, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(request, PCMK__XA_ST_OP, op); crm_xml_add(request, PCMK__XA_ST_CLIENTID, c->id); crm_xml_add(request, PCMK__XA_ST_CLIENTNAME, pcmk__client_name(c)); crm_xml_add(request, PCMK__XA_ST_CLIENTNODE, fenced_get_local_node()); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, request); pcmk__xml_free(request); return 0; } if (c->name == NULL) { const char *value = crm_element_value(request, PCMK__XA_ST_CLIENTNAME); c->name = crm_strdup_printf("%s.%u", pcmk__s(value, "unknown"), c->pid); } rc = pcmk__xe_get_flags(request, PCMK__XA_ST_CALLOPT, &call_options, st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from IPC request: %s", pcmk_rc_str(rc)); } crm_trace("Flags %#08" PRIx32 "/%#08x for command %" PRIu32 " from client %s", flags, call_options, id, pcmk__client_name(c)); if (pcmk_is_set(call_options, st_opt_sync_call)) { pcmk__assert(pcmk_is_set(flags, crm_ipc_client_response)); CRM_LOG_ASSERT(c->request_id == 0); /* This means the client has two synchronous events in-flight */ c->request_id = id; /* Reply only to the last one */ } crm_xml_add(request, PCMK__XA_ST_CLIENTID, c->id); crm_xml_add(request, PCMK__XA_ST_CLIENTNAME, pcmk__client_name(c)); crm_xml_add(request, PCMK__XA_ST_CLIENTNODE, fenced_get_local_node()); crm_log_xml_trace(request, "ipc-received"); stonith_command(c, id, flags, request, NULL); pcmk__xml_free(request); return 0; } /* Error code means? */ static int32_t st_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p closed", c); pcmk__free_client(client); /* 0 means: yes, go ahead and destroy the connection */ return 0; } static void st_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p destroyed", c); st_ipc_closed(c); } static void stonith_peer_callback(xmlNode * msg, void *private_data) { const char *remote_peer = crm_element_value(msg, PCMK__XA_SRC); const char *op = crm_element_value(msg, PCMK__XA_ST_OP); if (pcmk__str_eq(op, STONITH_OP_POKE, pcmk__str_none)) { return; } crm_log_xml_trace(msg, "Peer[inbound]"); stonith_command(NULL, 0, 0, msg, remote_peer); } #if SUPPORT_COROSYNC static void handle_cpg_message(cpg_handle_t handle, const struct cpg_name *groupName, uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) { xmlNode *xml = NULL; const char *from = NULL; char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &from); if(data == NULL) { return; } xml = pcmk__xml_parse(data); if (xml == NULL) { crm_err("Invalid XML: '%.120s'", data); free(data); return; } crm_xml_add(xml, PCMK__XA_SRC, from); stonith_peer_callback(xml, NULL); pcmk__xml_free(xml); free(data); } static void stonith_peer_cs_destroy(gpointer user_data) { crm_crit("Lost connection to cluster layer, shutting down"); stonith_shutdown(0); } #endif void do_local_reply(const xmlNode *notify_src, pcmk__client_t *client, int call_options) { /* send callback to originating child */ int local_rc = pcmk_rc_ok; int rid = 0; uint32_t ipc_flags = crm_ipc_server_event; if (pcmk_is_set(call_options, st_opt_sync_call)) { CRM_LOG_ASSERT(client->request_id); rid = client->request_id; client->request_id = 0; ipc_flags = crm_ipc_flags_none; } local_rc = pcmk__ipc_send_xml(client, rid, notify_src, ipc_flags); if (local_rc == pcmk_rc_ok) { crm_trace("Sent response %d to client %s", rid, pcmk__client_name(client)); } else { crm_warn("%synchronous reply to client %s failed: %s", (pcmk_is_set(call_options, st_opt_sync_call)? "S" : "As"), pcmk__client_name(client), pcmk_rc_str(local_rc)); } } /*! * \internal * \brief Parse a fencer client notification type string to a flag * * \param[in] type Notification type string * * \return Flag corresponding to \p type, or \c fenced_nf_none if none exists */ enum fenced_notify_flags fenced_parse_notify_flag(const char *type) { if (pcmk__str_eq(type, PCMK__VALUE_ST_NOTIFY_FENCE, pcmk__str_none)) { return fenced_nf_fence_result; } if (pcmk__str_eq(type, STONITH_OP_DEVICE_ADD, pcmk__str_none)) { return fenced_nf_device_registered; } if (pcmk__str_eq(type, STONITH_OP_DEVICE_DEL, pcmk__str_none)) { return fenced_nf_device_removed; } if (pcmk__str_eq(type, PCMK__VALUE_ST_NOTIFY_HISTORY, pcmk__str_none)) { return fenced_nf_history_changed; } if (pcmk__str_eq(type, PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED, pcmk__str_none)) { return fenced_nf_history_synced; } return fenced_nf_none; } static void stonith_notify_client(gpointer key, gpointer value, gpointer user_data) { const xmlNode *update_msg = user_data; pcmk__client_t *client = value; const char *type = NULL; CRM_CHECK(client != NULL, return); CRM_CHECK(update_msg != NULL, return); type = crm_element_value(update_msg, PCMK__XA_SUBT); CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return); if (client->ipcs == NULL) { crm_trace("Skipping client with NULL channel"); return; } if (pcmk_is_set(client->flags, fenced_parse_notify_flag(type))) { int rc = pcmk__ipc_send_xml(client, 0, update_msg, crm_ipc_server_event); if (rc != pcmk_rc_ok) { crm_warn("%s notification of client %s failed: %s " QB_XS " id=%.8s rc=%d", type, pcmk__client_name(client), pcmk_rc_str(rc), client->id, rc); } else { crm_trace("Sent %s notification to client %s", type, pcmk__client_name(client)); } } } void do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout) { pcmk__client_t *client = NULL; xmlNode *notify_data = NULL; if (!timeout || !call_id || !client_id) { return; } client = pcmk__find_client_by_id(client_id); if (!client) { return; } notify_data = pcmk__xe_create(NULL, PCMK__XE_ST_ASYNC_TIMEOUT_VALUE); crm_xml_add(notify_data, PCMK__XA_T, PCMK__VALUE_ST_ASYNC_TIMEOUT_VALUE); crm_xml_add(notify_data, PCMK__XA_ST_CALLID, call_id); crm_xml_add_int(notify_data, PCMK__XA_ST_TIMEOUT, timeout); crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id); if (client) { pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event); } pcmk__xml_free(notify_data); } /*! * \internal * \brief Notify relevant IPC clients of a fencing operation result * * \param[in] type Notification type * \param[in] result Result of fencing operation (assume success if NULL) * \param[in] data If not NULL, add to notification as call data */ void fenced_send_notification(const char *type, const pcmk__action_result_t *result, xmlNode *data) { /* TODO: Standardize the contents of data */ xmlNode *update_msg = pcmk__xe_create(NULL, PCMK__XE_NOTIFY); CRM_LOG_ASSERT(type != NULL); crm_xml_add(update_msg, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); crm_xml_add(update_msg, PCMK__XA_SUBT, type); crm_xml_add(update_msg, PCMK__XA_ST_OP, type); stonith__xe_set_result(update_msg, result); if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(update_msg, PCMK__XE_ST_CALLDATA); pcmk__xml_copy(wrapper, data); } crm_trace("Notifying clients"); pcmk__foreach_ipc_client(stonith_notify_client, update_msg); pcmk__xml_free(update_msg); crm_trace("Notify complete"); } /*! * \internal * \brief Send notifications for a configuration change to subscribed clients * * \param[in] op Notification type (\c STONITH_OP_DEVICE_ADD or * \c STONITH_OP_DEVICE_DEL) * \param[in] result Operation result * \param[in] desc Description of what changed (either device ID or string * representation of level * ([])) */ void fenced_send_config_notification(const char *op, const pcmk__action_result_t *result, const char *desc) { xmlNode *notify_data = pcmk__xe_create(NULL, op); crm_xml_add(notify_data, PCMK__XA_ST_DEVICE_ID, desc); fenced_send_notification(op, result, notify_data); pcmk__xml_free(notify_data); } /*! * \internal * \brief Check whether a node does watchdog-fencing * * \param[in] node Name of node to check * * \return TRUE if node found in stonith_watchdog_targets * or stonith_watchdog_targets is empty indicating * all nodes are doing watchdog-fencing */ gboolean node_does_watchdog_fencing(const char *node) { return ((stonith_watchdog_targets == NULL) || pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei)); } void stonith_shutdown(int nsig) { crm_info("Terminating with %d clients", pcmk__ipc_client_count()); stonith_shutdown_flag = TRUE; if (mainloop != NULL && g_main_loop_is_running(mainloop)) { g_main_loop_quit(mainloop); } } static void stonith_cleanup(void) { fenced_cib_cleanup(); if (ipcs) { qb_ipcs_destroy(ipcs); } pcmk__cluster_destroy_node_caches(); pcmk__client_cleanup(); free_stonith_remote_op_list(); free_topology_list(); fenced_free_device_table(); free_metadata_cache(); fenced_unregister_handlers(); } struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = st_ipc_accept, .connection_created = NULL, .msg_process = st_ipc_dispatch, .connection_closed = st_ipc_closed, .connection_destroyed = st_ipc_destroy }; /*! * \internal * \brief Callback for peer status changes * * \param[in] type What changed * \param[in] node What peer had the change * \param[in] data Previous value of what changed */ static void st_peer_update_callback(enum pcmk__node_update type, pcmk__node_status_t *node, const void *data) { if ((type != pcmk__node_update_processes) && !pcmk_is_set(node->flags, pcmk__node_status_remote)) { /* * This is a hack until we can send to a nodeid and/or we fix node name lookups * These messages are ignored in stonith_peer_callback() */ xmlNode *query = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); crm_xml_add(query, PCMK__XA_T, PCMK__VALUE_STONITH_NG); crm_xml_add(query, PCMK__XA_ST_OP, STONITH_OP_POKE); crm_debug("Broadcasting our uname because of node %" PRIu32, node->cluster_layer_id); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, query); pcmk__xml_free(query); } } /* @COMPAT Deprecated since 2.1.8. Use pcmk_list_fence_attrs() or * crm_resource --list-options=fencing instead of querying daemon metadata. * * NOTE: pcs (as of at least 0.11.8) uses this */ static int fencer_metadata(void) { const char *name = PCMK__SERVER_FENCED; const char *desc_short = N_("Instance attributes available for all " "\"stonith\"-class resources"); const char *desc_long = N_("Instance attributes available for all " "\"stonith\"-class resources and used by " "Pacemaker's fence daemon"); return pcmk__daemon_metadata(out, name, desc_short, desc_long, pcmk__opt_fencing); } static GOptionEntry entries[] = { { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &options.stand_alone, N_("Intended for use in regression testing only"), NULL }, { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY, &options.log_files, N_("Send logs to the additional named logfile"), NULL }, { NULL } }; static GOptionContext * build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) { GOptionContext *context = NULL; context = pcmk__build_arg_context(args, "text (default), xml", group, NULL); pcmk__add_main_args(context, entries); return context; } int main(int argc, char **argv) { int rc = pcmk_rc_ok; pcmk_cluster_t *cluster = NULL; crm_ipc_t *old_instance = NULL; GError *error = NULL; GOptionGroup *output_group = NULL; pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY); gchar **processed_args = pcmk__cmdline_preproc(argv, "l"); GOptionContext *context = build_arg_context(args, &output_group); crm_log_preinit(NULL, argc, argv); pcmk__register_formats(output_group, formats); if (!g_option_context_parse_strv(context, &processed_args, &error)) { exit_code = CRM_EX_USAGE; goto done; } rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_ERROR; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error creating output format %s: %s", args->output_ty, pcmk_rc_str(rc)); goto done; } if (args->version) { out->version(out, false); goto done; } if ((g_strv_length(processed_args) >= 2) && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) { rc = fencer_metadata(); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_FATAL; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Unable to display metadata: %s", pcmk_rc_str(rc)); } goto done; } // Open additional log files pcmk__add_logfiles(options.log_files, out); crm_log_init(NULL, LOG_INFO + args->verbosity, TRUE, (args->verbosity > 0), argc, argv, FALSE); crm_notice("Starting Pacemaker fencer"); old_instance = crm_ipc_new("stonith-ng", 0); if (old_instance == NULL) { /* crm_ipc_new() will have already logged an error message with * crm_err() */ exit_code = CRM_EX_FATAL; goto done; } if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) { // IPC endpoint already up crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); crm_crit("Aborting start-up because another fencer instance is " "already active"); goto done; } else { // Not up or not authentic, we'll proceed either way crm_ipc_destroy(old_instance); old_instance = NULL; } mainloop_add_signal(SIGTERM, stonith_shutdown); pcmk__cluster_init_node_caches(); rc = fenced_scheduler_init(); if (rc != pcmk_rc_ok) { exit_code = CRM_EX_FATAL; g_set_error(&error, PCMK__EXITC_ERROR, exit_code, "Error initializing scheduler data: %s", pcmk_rc_str(rc)); goto done; } cluster = pcmk_cluster_new(); #if SUPPORT_COROSYNC if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { pcmk_cluster_set_destroy_fn(cluster, stonith_peer_cs_destroy); pcmk_cpg_set_deliver_fn(cluster, handle_cpg_message); pcmk_cpg_set_confchg_fn(cluster, pcmk__cpg_confchg_cb); } #endif // SUPPORT_COROSYNC pcmk__cluster_set_status_callback(&st_peer_update_callback); if (pcmk_cluster_connect(cluster) != pcmk_rc_ok) { exit_code = CRM_EX_FATAL; crm_crit("Cannot sign in to the cluster... terminating"); goto done; } fenced_set_local_node(cluster->priv->node_name); if (!options.stand_alone) { setup_cib(); } fenced_init_device_table(); init_topology_list(); pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks); // Create the mainloop and run it... mainloop = g_main_loop_new(NULL, FALSE); crm_notice("Pacemaker fencer successfully started and accepting connections"); g_main_loop_run(mainloop); done: g_strfreev(processed_args); pcmk__free_arg_context(context); g_strfreev(options.log_files); stonith_cleanup(); pcmk_cluster_free(cluster); fenced_scheduler_cleanup(); pcmk__output_and_clear_error(&error, out); if (out != NULL) { out->finish(out, exit_code, true, NULL); pcmk__output_free(out); } pcmk__unregister_formats(); crm_exit(exit_code); } diff --git a/daemons/pacemakerd/pcmkd_messages.c b/daemons/pacemakerd/pcmkd_messages.c index 0dd9a773c6..1f22720abb 100644 --- a/daemons/pacemakerd/pcmkd_messages.c +++ b/daemons/pacemakerd/pcmkd_messages.c @@ -1,309 +1,309 @@ /* * Copyright 2010-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include "pacemakerd.h" #include #include #include #include #include #include #include #include static GHashTable *pcmkd_handlers = NULL; static xmlNode * handle_node_cache_request(pcmk__request_t *request) { crm_trace("Ignoring request from client %s to purge node " "because peer cache is not used", pcmk__client_name(request->ipc_client)); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_OK); return NULL; } static xmlNode * handle_ping_request(pcmk__request_t *request) { xmlNode *msg = request->xml; const char *value = NULL; xmlNode *ping = NULL; xmlNode *reply = NULL; const char *from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM); /* Pinged for status */ crm_trace("Pinged from " PCMK__XA_CRM_SYS_FROM "='%s' " PCMK_XA_ORIGIN "='%s'", pcmk__s(from, ""), pcmk__s(crm_element_value(msg, PCMK_XA_ORIGIN), "")); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); ping = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); value = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); crm_xml_add(ping, PCMK__XA_CRM_SUBSYSTEM, value); crm_xml_add(ping, PCMK__XA_PACEMAKERD_STATE, pacemakerd_state); crm_xml_add_ll(ping, PCMK_XA_CRM_TIMESTAMP, (long long) subdaemon_check_progress); crm_xml_add(ping, PCMK_XA_RESULT, "ok"); reply = pcmk__new_reply(msg, ping); pcmk__xml_free(ping); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Failed building ping reply for client %s", pcmk__client_name(request->ipc_client)); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } /* just proceed state on sbd pinging us */ if (from && strstr(from, "sbd")) { if (pcmk__str_eq(pacemakerd_state, PCMK__VALUE_SHUTDOWN_COMPLETE, pcmk__str_none)) { if (pcmk__get_sbd_sync_resource_startup()) { crm_notice("Shutdown-complete-state passed to SBD."); } shutdown_complete_state_reported_to = request->ipc_client->pid; } else if (pcmk__str_eq(pacemakerd_state, PCMK__VALUE_WAIT_FOR_PING, pcmk__str_none)) { crm_notice("Received startup-trigger from SBD."); pacemakerd_state = PCMK__VALUE_STARTING_DAEMONS; mainloop_set_trigger(startup_trigger); } } return reply; } static xmlNode * handle_shutdown_request(pcmk__request_t *request) { xmlNode *msg = request->xml; xmlNode *shutdown = NULL; xmlNode *reply = NULL; /* Only allow privileged users (i.e. root or hacluster) to shut down * Pacemaker from the command line (or direct IPC), so that other users * are forced to go through the CIB and have ACLs applied. */ bool allowed = pcmk_is_set(request->ipc_client->flags, pcmk__client_privileged); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); shutdown = pcmk__xe_create(NULL, PCMK__XE_SHUTDOWN); if (allowed) { crm_notice("Shutting down in response to IPC request %s from %s", crm_element_value(msg, PCMK_XA_REFERENCE), crm_element_value(msg, PCMK_XA_ORIGIN)); crm_xml_add_int(shutdown, PCMK__XA_OP_STATUS, CRM_EX_OK); } else { crm_warn("Ignoring shutdown request from unprivileged client %s", pcmk__client_name(request->ipc_client)); crm_xml_add_int(shutdown, PCMK__XA_OP_STATUS, CRM_EX_INSUFFICIENT_PRIV); } reply = pcmk__new_reply(msg, shutdown); pcmk__xml_free(shutdown); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Failed building shutdown reply for client %s", pcmk__client_name(request->ipc_client)); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } if (allowed) { pcmk_shutdown(15); } return reply; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INVALID_PARAM); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", pcmk__client_name(request->ipc_client)); return NULL; } static void pcmkd_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_RM_NODE_CACHE, handle_node_cache_request }, { CRM_OP_PING, handle_ping_request }, { CRM_OP_QUIT, handle_shutdown_request }, { NULL, handle_unknown_request }, }; pcmkd_handlers = pcmk__register_handlers(handlers); } static int32_t pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } /* Error code means? */ static int32_t pcmk_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); if (shutdown_complete_state_reported_to == client->pid) { shutdown_complete_state_reported_client_closed = TRUE; if (shutdown_trigger) { mainloop_set_trigger(shutdown_trigger); } } pcmk__free_client(client); return 0; } static void pcmk_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pcmk_ipc_closed(c); } /* Exit code means? */ static int32_t pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; xmlNode *msg = NULL; pcmk__client_t *c = pcmk__find_client(qbc); CRM_CHECK(c != NULL, return 0); if (pcmkd_handlers == NULL) { pcmkd_register_handlers(); } rc = pcmk__ipc_msg_append(&c->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - msg = pcmk__client_data2xml(c, c->buffer->data, &id, &flags); + msg = pcmk__client_data2xml(c, &id, &flags); g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (c->buffer != NULL) { g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } return 0; } if (msg == NULL) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } else { char *log_msg = NULL; const char *reason = NULL; xmlNode *reply = NULL; pcmk__request_t request = { .ipc_client = c, .ipc_id = id, .ipc_flags = flags, .peer = NULL, .xml = msg, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; request.op = crm_element_value_copy(request.xml, PCMK__XA_CRM_TASK); CRM_CHECK(request.op != NULL, return 0); reply = pcmk__process_request(&request, pcmkd_handlers); if (reply != NULL) { pcmk__ipc_send_xml(c, id, reply, crm_ipc_server_event); pcmk__xml_free(reply); } reason = request.result.exit_reason; log_msg = crm_strdup_printf("Processed %s request from %s %s: %s%s%s%s", request.op, pcmk__request_origin_type(&request), pcmk__request_origin(&request), pcmk_exec_status_str(request.result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); if (!pcmk__result_ok(&request.result)) { crm_warn("%s", log_msg); } else { crm_debug("%s", log_msg); } free(log_msg); pcmk__reset_request(&request); } pcmk__xml_free(msg); return 0; } struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks = { .connection_accept = pcmk_ipc_accept, .connection_created = NULL, .msg_process = pcmk_ipc_dispatch, .connection_closed = pcmk_ipc_closed, .connection_destroyed = pcmk_ipc_destroy }; diff --git a/daemons/schedulerd/schedulerd_messages.c b/daemons/schedulerd/schedulerd_messages.c index fd8a9221ac..1862b59f7f 100644 --- a/daemons/schedulerd/schedulerd_messages.c +++ b/daemons/schedulerd/schedulerd_messages.c @@ -1,362 +1,362 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include "pacemaker-schedulerd.h" static GHashTable *schedulerd_handlers = NULL; static pcmk_scheduler_t * init_scheduler(void) { pcmk_scheduler_t *scheduler = pcmk_new_scheduler(); pcmk__mem_assert(scheduler); scheduler->priv->out = logger_out; return scheduler; } static xmlNode * handle_pecalc_request(pcmk__request_t *request) { static struct series_s { const char *name; const char *param; /* Maximum number of inputs of this kind to save to disk. * If -1, save all; if 0, save none. */ int wrap; } series[] = { { "pe-error", PCMK_OPT_PE_ERROR_SERIES_MAX, -1 }, { "pe-warn", PCMK_OPT_PE_WARN_SERIES_MAX, 5000 }, { "pe-input", PCMK_OPT_PE_INPUT_SERIES_MAX, 4000 }, }; xmlNode *msg = request->xml; xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *xml_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); static char *last_digest = NULL; static char *filename = NULL; unsigned int seq = 0U; int series_id = 0; int series_wrap = 0; char *digest = NULL; const char *value = NULL; time_t execution_date = time(NULL); xmlNode *converted = NULL; xmlNode *reply = NULL; bool is_repoke = false; bool process = true; pcmk_scheduler_t *scheduler = init_scheduler(); pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); digest = pcmk__digest_xml(xml_data, false); converted = pcmk__xml_copy(NULL, xml_data); if (pcmk__update_configured_schema(&converted, true) != pcmk_rc_ok) { scheduler->priv->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH); crm_xml_add_int(scheduler->priv->graph, "transition_id", 0); crm_xml_add_int(scheduler->priv->graph, PCMK_OPT_CLUSTER_DELAY, 0); process = false; free(digest); } else if (pcmk__str_eq(digest, last_digest, pcmk__str_casei)) { is_repoke = true; free(digest); } else { free(last_digest); last_digest = digest; } if (process) { scheduler->input = converted; pcmk__set_scheduler_flags(scheduler, pcmk__sched_no_counts |pcmk__sched_show_utilization); pcmk__schedule_actions(scheduler); // Don't free converted as part of scheduler scheduler->input = NULL; } // Get appropriate index into series[] array if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_error) || pcmk__config_has_error) { series_id = 0; } else if (pcmk_is_set(scheduler->flags, pcmk__sched_processing_warning) || pcmk__config_has_warning) { series_id = 1; } else { series_id = 2; } value = pcmk__cluster_option(scheduler->priv->options, series[series_id].param); if ((value == NULL) || (pcmk__scan_min_int(value, &series_wrap, -1) != pcmk_rc_ok)) { series_wrap = series[series_id].wrap; } if (pcmk__read_series_sequence(PCMK_SCHEDULER_INPUT_DIR, series[series_id].name, &seq) != pcmk_rc_ok) { // @TODO maybe handle errors better ... seq = 0U; } crm_trace("Series %s: wrap=%d, seq=%u, pref=%s", series[series_id].name, series_wrap, seq, value); reply = pcmk__new_reply(msg, scheduler->priv->graph); if (reply == NULL) { pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "Failed building ping reply for client %s", pcmk__client_name(request->ipc_client)); goto done; } if (series_wrap == 0) { // Don't save any inputs of this kind free(filename); filename = NULL; } else if (!is_repoke) { // Input changed, save to disk free(filename); filename = pcmk__series_filename(PCMK_SCHEDULER_INPUT_DIR, series[series_id].name, seq, true); } crm_xml_add(reply, PCMK__XA_CRM_TGRAPH_IN, filename); pcmk__log_transition_summary(scheduler, filename); if (series_wrap == 0) { crm_debug("Not saving input to disk (disabled by configuration)"); } else if (is_repoke) { crm_info("Input has not changed since last time, not saving to disk"); } else { unlink(filename); crm_xml_add_ll(xml_data, PCMK_XA_EXECUTION_DATE, (long long) execution_date); pcmk__xml_write_file(xml_data, filename, true); pcmk__write_series_sequence(PCMK_SCHEDULER_INPUT_DIR, series[series_id].name, ++seq, series_wrap); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); done: pcmk__xml_free(converted); pcmk_free_scheduler(scheduler); return reply; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INVALID_PARAM); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", pcmk__client_name(request->ipc_client)); return NULL; } static xmlNode * handle_hello_request(pcmk__request_t *request) { pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_trace("Received IPC hello from %s", pcmk__client_name(request->ipc_client)); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static void schedulerd_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_HELLO, handle_hello_request }, { CRM_OP_PECALC, handle_pecalc_request }, { NULL, handle_unknown_request }, }; schedulerd_handlers = pcmk__register_handlers(handlers); } static int32_t pe_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) { crm_trace("Connection %p", c); if (pcmk__new_client(c, uid, gid) == NULL) { return -ENOMEM; } return 0; } static int32_t pe_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) { int rc = pcmk_rc_ok; uint32_t id = 0; uint32_t flags = 0; xmlNode *msg = NULL; pcmk__client_t *c = pcmk__find_client(qbc); const char *sys_to = NULL; CRM_CHECK(c != NULL, return 0); if (schedulerd_handlers == NULL) { schedulerd_register_handlers(); } rc = pcmk__ipc_msg_append(&c->buffer, data); if (rc == pcmk_rc_ipc_more) { /* We haven't read the complete message yet, so just return. */ return 0; } else if (rc == pcmk_rc_ok) { /* We've read the complete message and there's already a header on * the front. Pass it off for processing. */ - msg = pcmk__client_data2xml(c, c->buffer->data, &id, &flags); + msg = pcmk__client_data2xml(c, &id, &flags); g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } else { /* Some sort of error occurred reassembling the message. All we can * do is clean up, log an error and return. */ crm_err("Error when reading IPC message: %s", pcmk_rc_str(rc)); if (c->buffer != NULL) { g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } return 0; } if (msg == NULL) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_PROTOCOL); return 0; } sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO); if (pcmk__str_eq(crm_element_value(msg, PCMK__XA_SUBT), PCMK__VALUE_RESPONSE, pcmk__str_none)) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_info("Ignoring IPC reply from %s", pcmk__client_name(c)); } else if (!pcmk__str_eq(sys_to, CRM_SYSTEM_PENGINE, pcmk__str_none)) { pcmk__ipc_send_ack(c, id, flags, PCMK__XE_ACK, NULL, CRM_EX_INDETERMINATE); crm_info("Ignoring invalid IPC message: to '%s' not " CRM_SYSTEM_PENGINE, pcmk__s(sys_to, "")); } else { char *log_msg = NULL; const char *reason = NULL; xmlNode *reply = NULL; pcmk__request_t request = { .ipc_client = c, .ipc_id = id, .ipc_flags = flags, .peer = NULL, .xml = msg, .call_options = 0, .result = PCMK__UNKNOWN_RESULT, }; request.op = crm_element_value_copy(request.xml, PCMK__XA_CRM_TASK); CRM_CHECK(request.op != NULL, return 0); reply = pcmk__process_request(&request, schedulerd_handlers); if (reply != NULL) { pcmk__ipc_send_xml(c, id, reply, crm_ipc_server_event); pcmk__xml_free(reply); } reason = request.result.exit_reason; log_msg = crm_strdup_printf("Processed %s request from %s %s: %s%s%s%s", request.op, pcmk__request_origin_type(&request), pcmk__request_origin(&request), pcmk_exec_status_str(request.result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); if (!pcmk__result_ok(&request.result)) { crm_warn("%s", log_msg); } else { crm_debug("%s", log_msg); } free(log_msg); pcmk__reset_request(&request); } pcmk__xml_free(msg); return 0; } /* Error code means? */ static int32_t pe_ipc_closed(qb_ipcs_connection_t * c) { pcmk__client_t *client = pcmk__find_client(c); if (client == NULL) { return 0; } crm_trace("Connection %p", c); pcmk__free_client(client); return 0; } static void pe_ipc_destroy(qb_ipcs_connection_t * c) { crm_trace("Connection %p", c); pe_ipc_closed(c); } struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = pe_ipc_accept, .connection_created = NULL, .msg_process = pe_ipc_dispatch, .connection_closed = pe_ipc_closed, .connection_destroyed = pe_ipc_destroy }; diff --git a/include/crm/common/ipc_internal.h b/include/crm/common/ipc_internal.h index a98ee1938a..134ae3cc36 100644 --- a/include/crm/common/ipc_internal.h +++ b/include/crm/common/ipc_internal.h @@ -1,293 +1,292 @@ /* * Copyright 2013-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_IPC_INTERNAL__H #define PCMK__CRM_COMMON_IPC_INTERNAL__H #include // bool #include // uint32_t, uint64_t, UINT64_C() #include // struct iovec #include // uid_t, gid_t, pid_t, size_t #include // gnutls_session_t #include // guint, gpointer, GQueue, ... #include // xmlNode #include // qb_ipcs_connection_t, ... #include // HAVE_GETPEEREID #include // crm_system_name #include #include // pcmk_controld_api_reply #include // pcmk_pacemakerd_{api_reply,state} #include // mainloop_io_t #ifdef __cplusplus extern "C" { #endif /* denotes "non yieldable PID" on FreeBSD, or actual PID1 in scenarios that require a delicate handling anyway (socket-based activation with systemd); we can be reasonably sure that this PID is never possessed by the actual child daemon, as it gets taken either by the proper init, or by pacemakerd itself (i.e. this precludes anything else); note that value of zero is meant to carry "unset" meaning, and better not to bet on/conditionalize over signedness of pid_t */ #define PCMK__SPECIAL_PID 1 // Timeout (in seconds) to use for IPC client sends, reply waits, etc. #define PCMK__IPC_TIMEOUT 120 #if defined(HAVE_GETPEEREID) /* on FreeBSD, we don't want to expose "non-yieldable PID" (leading to "IPC liveness check only") as its nominal representation, which could cause confusion -- this is unambiguous as long as there's no socket-based activation like with systemd (very improbable) */ #define PCMK__SPECIAL_PID_AS_0(p) (((p) == PCMK__SPECIAL_PID) ? 0 : (p)) #else #define PCMK__SPECIAL_PID_AS_0(p) (p) #endif /*! * \internal * \brief Check the authenticity and liveness of the process via IPC end-point * * When IPC daemon under given IPC end-point (name) detected, its authenticity * is verified by the means of comparing against provided referential UID and * GID, and the result of this check can be deduced from the return value. * As an exception, referential UID of 0 (~ root) satisfies arbitrary * detected daemon's credentials. * * \param[in] name IPC name to base the search on * \param[in] refuid referential UID to check against * \param[in] refgid referential GID to check against * \param[out] gotpid to optionally store obtained PID of the found process * upon returning 1 or -2 * (not available on FreeBSD, special value of 1, * see PCMK__SPECIAL_PID, used instead, and the caller * is required to special case this value respectively) * * \return Standard Pacemaker return code * * \note Return codes of particular interest include pcmk_rc_ipc_unresponsive * indicating that no trace of IPC liveness was detected, and * pcmk_rc_ipc_unauthorized indicating that the IPC endpoint is blocked by * an unauthorized process. * \note This function emits a log message for return codes other than * pcmk_rc_ok and pcmk_rc_ipc_unresponsive, and when there isn't a perfect * match in respect to \p reguid and/or \p refgid, for a possible * least privilege principle violation. * * \see crm_ipc_is_authentic_process */ int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, gid_t refgid, pid_t *gotpid); int pcmk__connect_generic_ipc(crm_ipc_t *ipc); int pcmk__ipc_fd(crm_ipc_t *ipc, int *fd); int pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type, int attempts); /* * Server-related */ typedef struct pcmk__client_s pcmk__client_t; struct pcmk__remote_s { /* Shared */ char *buffer; size_t buffer_size; size_t buffer_offset; int auth_timeout; int tcp_socket; mainloop_io_t *source; time_t uptime; char *start_state; /* CIB-only */ char *token; /* TLS only */ // Must be created by pcmk__new_tls_session() gnutls_session_t tls_session; }; enum pcmk__client_flags { // Lower 32 bits are reserved for server (not library) use // Next 8 bits are reserved for client type (sort of a cheap enum) //! Client uses plain IPC pcmk__client_ipc = (UINT64_C(1) << 32), //! Client uses TCP connection pcmk__client_tcp = (UINT64_C(1) << 33), //! Client uses TCP with TLS pcmk__client_tls = (UINT64_C(1) << 34), // The rest are client attributes //! Client IPC is proxied pcmk__client_proxied = (UINT64_C(1) << 40), //! Client is run by root or cluster user pcmk__client_privileged = (UINT64_C(1) << 41), //! Local client to be proxied pcmk__client_to_proxy = (UINT64_C(1) << 42), /*! * \brief Client IPC connection accepted * * Used only for remote CIB connections via \c PCMK_XA_REMOTE_TLS_PORT. */ pcmk__client_authenticated = (UINT64_C(1) << 43), //! Client TLS handshake is complete pcmk__client_tls_handshake_complete = (UINT64_C(1) << 44), }; #define PCMK__CLIENT_TYPE(client) ((client)->flags & UINT64_C(0xff00000000)) struct pcmk__client_s { unsigned int pid; char *id; char *name; char *user; uint64_t flags; // Group of pcmk__client_flags int request_id; void *userdata; int event_timer; GQueue *event_queue; /* Buffer used to store a multipart IPC message when we are building it * up over multiple reads. */ GByteArray *buffer; /* Depending on the client type, only some of the following will be * populated/valid. @TODO Maybe convert to a union. */ qb_ipcs_connection_t *ipcs; /* IPC */ struct pcmk__remote_s *remote; /* TCP/TLS */ unsigned int queue_backlog; /* IPC queue length after last flush */ unsigned int queue_max; /* Evict client whose queue grows this big */ }; #define pcmk__set_client_flags(client, flags_to_set) do { \ (client)->flags = pcmk__set_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Client", pcmk__client_name(client), \ (client)->flags, (flags_to_set), #flags_to_set); \ } while (0) #define pcmk__clear_client_flags(client, flags_to_clear) do { \ (client)->flags = pcmk__clear_flags_as(__func__, __LINE__, \ LOG_TRACE, \ "Client", pcmk__client_name(client), \ (client)->flags, (flags_to_clear), #flags_to_clear); \ } while (0) #define pcmk__set_ipc_flags(ipc_flags, ipc_name, flags_to_set) do { \ ipc_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \ "IPC", (ipc_name), \ (ipc_flags), (flags_to_set), \ #flags_to_set); \ } while (0) #define pcmk__clear_ipc_flags(ipc_flags, ipc_name, flags_to_clear) do { \ ipc_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \ "IPC", (ipc_name), \ (ipc_flags), (flags_to_clear), \ #flags_to_clear); \ } while (0) guint pcmk__ipc_client_count(void); void pcmk__foreach_ipc_client(GHFunc func, gpointer user_data); void pcmk__client_cleanup(void); pcmk__client_t *pcmk__find_client(const qb_ipcs_connection_t *c); pcmk__client_t *pcmk__find_client_by_id(const char *id); const char *pcmk__client_name(const pcmk__client_t *c); const char *pcmk__client_type_str(uint64_t client_type); pcmk__client_t *pcmk__new_unauth_client(void *key); pcmk__client_t *pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid); void pcmk__free_client(pcmk__client_t *c); void pcmk__drop_all_clients(qb_ipcs_service_t *s); void pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax); xmlNode *pcmk__ipc_create_ack_as(const char *function, int line, uint32_t flags, const char *tag, const char *ver, crm_exit_t status); #define pcmk__ipc_create_ack(flags, tag, ver, st) \ pcmk__ipc_create_ack_as(__func__, __LINE__, (flags), (tag), (ver), (st)) int pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, uint32_t request, uint32_t flags, const char *tag, const char *ver, crm_exit_t status); #define pcmk__ipc_send_ack(c, req, flags, tag, ver, st) \ pcmk__ipc_send_ack_as(__func__, __LINE__, (c), (req), (flags), (tag), (ver), (st)) int pcmk__ipc_prepare_iov(uint32_t request, const GString *message, uint16_t index, struct iovec **result, ssize_t *bytes); int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, const xmlNode *message, uint32_t flags); int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags); void pcmk__ipc_free_client_buffer(crm_ipc_t *client); int pcmk__ipc_msg_append(GByteArray **buffer, guint8 *data); -xmlNode *pcmk__client_data2xml(pcmk__client_t *c, void *data, - uint32_t *id, uint32_t *flags); +xmlNode *pcmk__client_data2xml(pcmk__client_t *c, uint32_t *id, uint32_t *flags); int pcmk__client_pid(qb_ipcs_connection_t *c); void pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb); void pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb); void pcmk__serve_pacemakerd_ipc(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb); qb_ipcs_service_t *pcmk__serve_schedulerd_ipc(struct qb_ipcs_service_handlers *cb); qb_ipcs_service_t *pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb); void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, qb_ipcs_service_t **ipcs_rw, qb_ipcs_service_t **ipcs_shm, struct qb_ipcs_service_handlers *ro_cb, struct qb_ipcs_service_handlers *rw_cb); void pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro, qb_ipcs_service_t *ipcs_rw, qb_ipcs_service_t *ipcs_shm); static inline const char * pcmk__ipc_sys_name(const char *ipc_name, const char *fallback) { return ipc_name ? ipc_name : ((crm_system_name ? crm_system_name : fallback)); } const char *pcmk__pcmkd_state_enum2friendly(enum pcmk_pacemakerd_state state); const char *pcmk__controld_api_reply2str(enum pcmk_controld_api_reply reply); const char *pcmk__pcmkd_api_reply2str(enum pcmk_pacemakerd_api_reply reply); #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_IPC_INTERNAL__H diff --git a/lib/common/ipc_server.c b/lib/common/ipc_server.c index 44567be1e2..25c788b883 100644 --- a/lib/common/ipc_server.c +++ b/lib/common/ipc_server.c @@ -1,1159 +1,1158 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include "crmcommon_private.h" /* Evict clients whose event queue grows this large (by default) */ #define PCMK_IPC_DEFAULT_QUEUE_MAX 500 static GHashTable *client_connections = NULL; /*! * \internal * \brief Count IPC clients * * \return Number of active IPC client connections */ guint pcmk__ipc_client_count(void) { return client_connections? g_hash_table_size(client_connections) : 0; } /*! * \internal * \brief Execute a function for each active IPC client connection * * \param[in] func Function to call * \param[in,out] user_data Pointer to pass to function * * \note The parameters are the same as for g_hash_table_foreach(). */ void pcmk__foreach_ipc_client(GHFunc func, gpointer user_data) { if ((func != NULL) && (client_connections != NULL)) { g_hash_table_foreach(client_connections, func, user_data); } } pcmk__client_t * pcmk__find_client(const qb_ipcs_connection_t *c) { if (client_connections) { return g_hash_table_lookup(client_connections, c); } crm_trace("No client found for %p", c); return NULL; } pcmk__client_t * pcmk__find_client_by_id(const char *id) { if ((client_connections != NULL) && (id != NULL)) { gpointer key; pcmk__client_t *client = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, client_connections); while (g_hash_table_iter_next(&iter, &key, (gpointer *) & client)) { if (strcmp(client->id, id) == 0) { return client; } } } crm_trace("No client found with id='%s'", pcmk__s(id, "")); return NULL; } /*! * \internal * \brief Get a client identifier for use in log messages * * \param[in] c Client * * \return Client's name, client's ID, or a string literal, as available * \note This is intended to be used in format strings like "client %s". */ const char * pcmk__client_name(const pcmk__client_t *c) { if (c == NULL) { return "(unspecified)"; } else if (c->name != NULL) { return c->name; } else if (c->id != NULL) { return c->id; } else { return "(unidentified)"; } } void pcmk__client_cleanup(void) { if (client_connections != NULL) { int active = g_hash_table_size(client_connections); if (active > 0) { crm_warn("Exiting with %d active IPC client%s", active, pcmk__plural_s(active)); } g_hash_table_destroy(client_connections); client_connections = NULL; } } void pcmk__drop_all_clients(qb_ipcs_service_t *service) { qb_ipcs_connection_t *c = NULL; if (service == NULL) { return; } c = qb_ipcs_connection_first_get(service); while (c != NULL) { qb_ipcs_connection_t *last = c; c = qb_ipcs_connection_next_get(service, last); /* There really shouldn't be anyone connected at this point */ crm_notice("Disconnecting client %p, pid=%d...", last, pcmk__client_pid(last)); qb_ipcs_disconnect(last); qb_ipcs_connection_unref(last); } } /*! * \internal * \brief Allocate a new pcmk__client_t object based on an IPC connection * * \param[in] c IPC connection (NULL to allocate generic client) * \param[in] key Connection table key (NULL to use sane default) * \param[in] uid_client UID corresponding to c (ignored if c is NULL) * * \return Pointer to new pcmk__client_t (guaranteed not to be \c NULL) */ static pcmk__client_t * client_from_connection(qb_ipcs_connection_t *c, void *key, uid_t uid_client) { pcmk__client_t *client = pcmk__assert_alloc(1, sizeof(pcmk__client_t)); if (c) { client->user = pcmk__uid2username(uid_client); if (client->user == NULL) { client->user = pcmk__str_copy("#unprivileged"); crm_err("Unable to enforce ACLs for user ID %d, assuming unprivileged", uid_client); } client->ipcs = c; pcmk__set_client_flags(client, pcmk__client_ipc); client->pid = pcmk__client_pid(c); if (key == NULL) { key = c; } } client->id = crm_generate_uuid(); if (key == NULL) { key = client->id; } if (client_connections == NULL) { crm_trace("Creating IPC client table"); client_connections = g_hash_table_new(g_direct_hash, g_direct_equal); } g_hash_table_insert(client_connections, key, client); return client; } /*! * \brief Allocate a new pcmk__client_t object and generate its ID * * \param[in] key What to use as connections hash table key (NULL to use ID) * * \return Pointer to new pcmk__client_t (asserts on failure) */ pcmk__client_t * pcmk__new_unauth_client(void *key) { return client_from_connection(NULL, key, 0); } pcmk__client_t * pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid_client, gid_t gid_client) { gid_t uid_cluster = 0; gid_t gid_cluster = 0; pcmk__client_t *client = NULL; CRM_CHECK(c != NULL, return NULL); if (pcmk_daemon_user(&uid_cluster, &gid_cluster) < 0) { static bool need_log = TRUE; if (need_log) { crm_warn("Could not find user and group IDs for user %s", CRM_DAEMON_USER); need_log = FALSE; } } if (uid_client != 0) { crm_trace("Giving group %u access to new IPC connection", gid_cluster); /* Passing -1 to chown(2) means don't change */ qb_ipcs_connection_auth_set(c, -1, gid_cluster, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); } /* TODO: Do our own auth checking, return NULL if unauthorized */ client = client_from_connection(c, NULL, uid_client); if ((uid_client == 0) || (uid_client == uid_cluster)) { /* Remember when a connection came from root or hacluster */ pcmk__set_client_flags(client, pcmk__client_privileged); } crm_debug("New IPC client %s for PID %u with uid %d and gid %d", client->id, client->pid, uid_client, gid_client); return client; } static struct iovec * pcmk__new_ipc_event(void) { return (struct iovec *) pcmk__assert_alloc(2, sizeof(struct iovec)); } /*! * \brief Free an I/O vector created by pcmk__ipc_prepare_iov() * * \param[in,out] event I/O vector to free */ void pcmk_free_ipc_event(struct iovec *event) { if (event != NULL) { free(event[0].iov_base); free(event[1].iov_base); free(event); } } static void free_event(gpointer data) { pcmk_free_ipc_event((struct iovec *) data); } static void add_event(pcmk__client_t *c, struct iovec *iov) { if (c->event_queue == NULL) { c->event_queue = g_queue_new(); } g_queue_push_tail(c->event_queue, iov); } void pcmk__free_client(pcmk__client_t *c) { if (c == NULL) { return; } if (client_connections) { if (c->ipcs) { crm_trace("Destroying %p/%p (%d remaining)", c, c->ipcs, g_hash_table_size(client_connections) - 1); g_hash_table_remove(client_connections, c->ipcs); } else { crm_trace("Destroying remote connection %p (%d remaining)", c, g_hash_table_size(client_connections) - 1); g_hash_table_remove(client_connections, c->id); } } if (c->event_timer) { g_source_remove(c->event_timer); } if (c->event_queue) { crm_debug("Destroying %d events", g_queue_get_length(c->event_queue)); g_queue_free_full(c->event_queue, free_event); } free(c->id); free(c->name); free(c->user); if (c->buffer != NULL) { g_byte_array_free(c->buffer, TRUE); c->buffer = NULL; } if (c->remote) { if (c->remote->auth_timeout) { g_source_remove(c->remote->auth_timeout); } if (c->remote->tls_session != NULL) { /* @TODO Reduce duplication at callers. Put here everything * necessary to tear down and free tls_session. */ gnutls_deinit(c->remote->tls_session); } free(c->remote->buffer); free(c->remote); } free(c); } /*! * \internal * \brief Raise IPC eviction threshold for a client, if allowed * * \param[in,out] client Client to modify * \param[in] qmax New threshold */ void pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax) { int rc = pcmk_rc_ok; long long qmax_ll = 0LL; unsigned int orig_value = 0U; CRM_CHECK(client != NULL, return); orig_value = client->queue_max; if (pcmk_is_set(client->flags, pcmk__client_privileged)) { rc = pcmk__scan_ll(qmax, &qmax_ll, 0LL); if (rc == pcmk_rc_ok) { if ((qmax_ll <= 0LL) || (qmax_ll > UINT_MAX)) { rc = ERANGE; } else { client->queue_max = (unsigned int) qmax_ll; } } } else { rc = EACCES; } if (rc != pcmk_rc_ok) { crm_info("Could not set IPC threshold for client %s[%u] to %s: %s", pcmk__client_name(client), client->pid, pcmk__s(qmax, "default"), pcmk_rc_str(rc)); } else if (client->queue_max != orig_value) { crm_debug("IPC threshold for client %s[%u] is now %u (was %u)", pcmk__client_name(client), client->pid, client->queue_max, orig_value); } } int pcmk__client_pid(qb_ipcs_connection_t *c) { struct qb_ipcs_connection_stats stats; stats.client_pid = 0; qb_ipcs_connection_stats_get(c, &stats, 0); return stats.client_pid; } /*! * \internal * \brief Retrieve message XML from data read from client IPC * * \param[in,out] c IPC client connection - * \param[in] data Data read from client connection * \param[out] id Where to store message ID from libqb header * \param[out] flags Where to store flags from libqb header * * \return Message XML on success, NULL otherwise */ xmlNode * -pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id, - uint32_t *flags) +pcmk__client_data2xml(pcmk__client_t *c, uint32_t *id, uint32_t *flags) { xmlNode *xml = NULL; - char *text = ((char *)data) + sizeof(pcmk__ipc_header_t); - pcmk__ipc_header_t *header = data; + pcmk__ipc_header_t *header = (void *) c->buffer->data; + char *text = (char *) header + sizeof(pcmk__ipc_header_t); if (!pcmk__valid_ipc_header(header)) { return NULL; } if (id) { - *id = ((struct qb_ipc_response_header *)data)->id; + *id = header->qb.id; } + if (flags) { *flags = header->flags; } if (pcmk_is_set(header->flags, crm_ipc_proxied)) { /* Mark this client as being the endpoint of a proxy connection. * Proxy connections responses are sent on the event channel, to avoid * blocking the controller serving as proxy. */ pcmk__set_client_flags(c, pcmk__client_proxied); } pcmk__assert(text[header->size - 1] == 0); xml = pcmk__xml_parse(text); crm_log_xml_trace(xml, "[IPC received]"); return xml; } static int crm_ipcs_flush_events(pcmk__client_t *c); static gboolean crm_ipcs_flush_events_cb(gpointer data) { pcmk__client_t *c = data; c->event_timer = 0; crm_ipcs_flush_events(c); return FALSE; } /*! * \internal * \brief Add progressive delay before next event queue flush * * \param[in,out] c Client connection to add delay to * \param[in] queue_len Current event queue length */ static inline void delay_next_flush(pcmk__client_t *c, unsigned int queue_len) { /* Delay a maximum of 1.5 seconds */ guint delay = (queue_len < 5)? (1000 + 100 * queue_len) : 1500; c->event_timer = pcmk__create_timer(delay, crm_ipcs_flush_events_cb, c); } /*! * \internal * \brief Send client any messages in its queue * * \param[in,out] c Client to flush * * \return Standard Pacemaker return value */ static int crm_ipcs_flush_events(pcmk__client_t *c) { int rc = pcmk_rc_ok; ssize_t qb_rc = 0; unsigned int sent = 0; unsigned int queue_len = 0; if (c == NULL) { return rc; } else if (c->event_timer) { /* There is already a timer, wait until it goes off */ crm_trace("Timer active for %p - %d", c->ipcs, c->event_timer); return rc; } if (c->event_queue) { queue_len = g_queue_get_length(c->event_queue); } while (sent < 100) { pcmk__ipc_header_t *header = NULL; struct iovec *event = NULL; if (c->event_queue) { // We don't pop unless send is successful event = g_queue_peek_head(c->event_queue); } if (event == NULL) { // Queue is empty break; } /* Retry sending the event up to five times. If we get -EAGAIN, sleep * a very short amount of time (too long here is bad) and try again. * If we simply exit the while loop on -EAGAIN, we'll have to wait until * the timer fires off again (up to 1.5 seconds - see delay_next_flush) * to retry sending the message. * * In that case, the queue may just continue to grow faster than we are * processing it, eventually leading to daemons timing out waiting for * replies, which will cause wider failures. */ for (unsigned int retries = 5; retries > 0; retries--) { qb_rc = qb_ipcs_event_sendv(c->ipcs, event, 2); if (qb_rc < 0) { if (retries == 1 || qb_rc != -EAGAIN) { rc = (int) -qb_rc; goto no_more_retries; } else { pcmk__sleep_ms(5); } } else { break; } } event = g_queue_pop_head(c->event_queue); sent++; header = event[0].iov_base; crm_trace("Event %" PRId32 " to %p[%u] (%zd bytes) sent: %.120s", header->qb.id, c->ipcs, c->pid, qb_rc, (char *) (event[1].iov_base)); pcmk_free_ipc_event(event); } no_more_retries: queue_len -= sent; if (sent > 0 || queue_len) { crm_trace("Sent %u events (%u remaining) for %p[%d]: %s (%zd)", sent, queue_len, c->ipcs, c->pid, pcmk_rc_str(rc), qb_rc); } if (queue_len) { /* Allow clients to briefly fall behind on processing incoming messages, * but drop completely unresponsive clients so the connection doesn't * consume resources indefinitely. */ if (queue_len > QB_MAX(c->queue_max, PCMK_IPC_DEFAULT_QUEUE_MAX)) { if ((c->queue_backlog <= 1) || (queue_len < c->queue_backlog)) { /* Don't evict for a new or shrinking backlog */ crm_warn("Client with process ID %u has a backlog of %u messages " QB_XS " %p", c->pid, queue_len, c->ipcs); } else { crm_err("Evicting client with process ID %u due to backlog of %u messages " QB_XS " %p", c->pid, queue_len, c->ipcs); c->queue_backlog = 0; qb_ipcs_disconnect(c->ipcs); return rc; } } c->queue_backlog = queue_len; delay_next_flush(c, queue_len); } else { /* Event queue is empty, there is no backlog */ c->queue_backlog = 0; } return rc; } /*! * \internal * \brief Create an I/O vector for sending an IPC XML message * * If the message is too large to fit into a single buffer, this function will * prepare an I/O vector that only holds as much as fits. The remainder can be * prepared in a separate call by keeping a running count of the number of times * this function has been called and passing that in for \p index. * * \param[in] request Identifier for libqb response header * \param[in] message Message to send * \param[in] index How many times this function has been called - basically, * a count of how many chunks of \p message have already * been sent * \param[out] result Where to store prepared I/O vector - NULL on error * \param[out] bytes Size of prepared data in bytes (includes header) * * \return Standard Pacemaker return code */ int pcmk__ipc_prepare_iov(uint32_t request, const GString *message, uint16_t index, struct iovec **result, ssize_t *bytes) { struct iovec *iov = NULL; unsigned int payload_size = 0; unsigned int total = 0; unsigned int max_send_size = crm_ipc_default_buffer_size(); unsigned int max_chunk_size = 0; size_t offset = 0; pcmk__ipc_header_t *header = NULL; int rc = pcmk_rc_ok; if ((message == NULL) || (result == NULL)) { rc = EINVAL; goto done; } header = calloc(1, sizeof(pcmk__ipc_header_t)); if (header == NULL) { rc = ENOMEM; goto done; } *result = NULL; iov = pcmk__new_ipc_event(); iov[0].iov_len = sizeof(pcmk__ipc_header_t); iov[0].iov_base = header; header->version = PCMK__IPC_VERSION; /* We are passed an index, which is basically how many times this function * has been called. This is how we support multi-part IPC messages. We * need to convert that into an offset into the buffer that we want to start * reading from. * * Each call to this function can send max_send_size, but this also includes * the header and a null terminator character for the end of the payload. * We need to subtract those out here. */ max_chunk_size = max_send_size - iov[0].iov_len - 1; offset = index * max_chunk_size; /* How much of message is left to send? This does not include the null * terminator character. */ payload_size = message->len - offset; /* How much would be transmitted, including the header size and null * terminator character for the buffer? */ total = iov[0].iov_len + payload_size + 1; if (total >= max_send_size) { /* The entire packet is too big to fit in a single buffer. Calculate * how much of it we can send - buffer size, minus header size, minus * one for the null terminator. */ payload_size = max_chunk_size; header->size = payload_size + 1; iov[1].iov_base = strndup(message->str + offset, payload_size); if (iov[1].iov_base == NULL) { rc = ENOMEM; goto done; } iov[1].iov_len = header->size; rc = pcmk_rc_ipc_more; } else { /* The entire packet fits in a single buffer. We can copy the entirety * of it into the payload. */ header->size = payload_size + 1; iov[1].iov_base = pcmk__str_copy(message->str + offset); iov[1].iov_len = header->size; } header->part_id = index; header->qb.size = iov[0].iov_len + iov[1].iov_len; header->qb.id = (int32_t)request; /* Replying to a specific request */ if ((rc == pcmk_rc_ok) && (index != 0)) { pcmk__set_ipc_flags(header->flags, "multipart ipc", crm_ipc_multipart | crm_ipc_multipart_end); } else if (rc == pcmk_rc_ipc_more) { pcmk__set_ipc_flags(header->flags, "multipart ipc", crm_ipc_multipart); } *result = iov; pcmk__assert(header->qb.size > 0); if (bytes != NULL) { *bytes = header->qb.size; } done: if ((rc != pcmk_rc_ok) && (rc != pcmk_rc_ipc_more)) { pcmk_free_ipc_event(iov); } return rc; } /* Return the next available ID for a server event. * * For the parts of a multipart event, all parts should have the same ID as * the first part. */ static uint32_t id_for_server_event(pcmk__ipc_header_t *header) { static uint32_t id = 1; if (pcmk_is_set(header->flags, crm_ipc_multipart) && (header->part_id != 0)) { return id; } else { id++; return id; } } int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags) { int rc = pcmk_rc_ok; pcmk__ipc_header_t *header = iov[0].iov_base; /* _ALL_ replies to proxied connections need to be sent as events */ if (pcmk_is_set(c->flags, pcmk__client_proxied) && !pcmk_is_set(flags, crm_ipc_server_event)) { /* The proxied flag lets us know this was originally meant to be a * response, even though we're sending it over the event channel. */ pcmk__set_ipc_flags(flags, "server event", crm_ipc_server_event|crm_ipc_proxied_relay_response); } pcmk__set_ipc_flags(header->flags, "server event", flags); if (pcmk_is_set(flags, crm_ipc_server_event)) { /* Server events don't use an ID, though we do set one in * pcmk__ipc_prepare_iov if the event is in response to a particular * request. In that case, we don't want to set a new ID here that * overwrites that one. * * @TODO: Since server event IDs aren't used anywhere, do we really * need to set this for any reason other than ease of logging? */ if (header->qb.id == 0) { header->qb.id = id_for_server_event(header); } if (pcmk_is_set(flags, crm_ipc_server_free)) { crm_trace("Sending the original to %p[%d]", c->ipcs, c->pid); add_event(c, iov); } else { struct iovec *iov_copy = pcmk__new_ipc_event(); crm_trace("Sending a copy to %p[%d]", c->ipcs, c->pid); iov_copy[0].iov_len = iov[0].iov_len; iov_copy[0].iov_base = malloc(iov[0].iov_len); memcpy(iov_copy[0].iov_base, iov[0].iov_base, iov[0].iov_len); iov_copy[1].iov_len = iov[1].iov_len; iov_copy[1].iov_base = malloc(iov[1].iov_len); memcpy(iov_copy[1].iov_base, iov[1].iov_base, iov[1].iov_len); add_event(c, iov_copy); } rc = crm_ipcs_flush_events(c); } else { ssize_t qb_rc; char *part_text = NULL; CRM_LOG_ASSERT(header->qb.id != 0); /* Replying to a specific request */ if (pcmk_is_set(header->flags, crm_ipc_multipart_end)) { part_text = crm_strdup_printf(" (final part %d) ", header->part_id); } else if (pcmk_is_set(header->flags, crm_ipc_multipart)) { if (header->part_id == 0) { part_text = crm_strdup_printf(" (initial part %d) ", header->part_id); } else { part_text = crm_strdup_printf(" (part %d) ", header->part_id); } } else { part_text = crm_strdup_printf(" "); } qb_rc = qb_ipcs_response_sendv(c->ipcs, iov, 2); if (qb_rc < header->qb.size) { if (qb_rc < 0) { rc = (int) -qb_rc; } crm_notice("Response %" PRId32 "%sto pid %u failed: %s " QB_XS " bytes=%" PRId32 " rc=%zd ipcs=%p", header->qb.id, part_text, c->pid, pcmk_rc_str(rc), header->qb.size, qb_rc, c->ipcs); } else { crm_trace("Response %" PRId32 "%ssent, %zd bytes to %p[%u]", header->qb.id, part_text, qb_rc, c->ipcs, c->pid); crm_trace("Text = %s", (char *) iov[1].iov_base); } free(part_text); if (pcmk_is_set(flags, crm_ipc_server_free)) { pcmk_free_ipc_event(iov); } crm_ipcs_flush_events(c); } if ((rc == EPIPE) || (rc == ENOTCONN)) { crm_trace("Client %p disconnected", c->ipcs); } return rc; } int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, const xmlNode *message, uint32_t flags) { struct iovec *iov = NULL; int rc = pcmk_rc_ok; GString *iov_buffer = NULL; uint16_t index = 0; bool event_or_proxied = false; if (c == NULL) { return EINVAL; } iov_buffer = g_string_sized_new(1024); pcmk__xml_string(message, 0, iov_buffer, 0); /* Testing crm_ipc_server_event is obvious. pcmk__client_proxied is less * obvious. According to pcmk__ipc_send_iov, replies to proxied connections * need to be sent as events. However, do_local_notify (which calls this * function) will clear all flags so we can't go just by crm_ipc_server_event. * * Changing do_local_notify to check for a proxied connection first results * in processes on the Pacemaker Remote node (like cibadmin or crm_mon) * timing out when waiting for a reply. */ event_or_proxied = pcmk_is_set(flags, crm_ipc_server_event) || pcmk_is_set(c->flags, pcmk__client_proxied); do { rc = pcmk__ipc_prepare_iov(request, iov_buffer, index, &iov, NULL); switch (rc) { case pcmk_rc_ok: /* No more chunks to send after this one */ pcmk__set_ipc_flags(flags, "send data", crm_ipc_server_free); rc = pcmk__ipc_send_iov(c, iov, flags); if (event_or_proxied) { if (rc == EAGAIN) { /* Return pcmk_rc_ok instead so callers don't have to know * whether they passed an event or not when interpreting * the return code. */ rc = pcmk_rc_ok; } } else { /* EAGAIN is an error for IPC messages. We don't have a * send queue for these, so we need to try again. If there * was some other error, we need to break out of this loop * and report it. * * FIXME: Retry limit for EAGAIN? */ if (rc == EAGAIN) { break; } } goto done; case pcmk_rc_ipc_more: /* There are more chunks to send after this one */ pcmk__set_ipc_flags(flags, "send data", crm_ipc_server_free); rc = pcmk__ipc_send_iov(c, iov, flags); /* Did an error occur during transmission? */ if (event_or_proxied) { /* EAGAIN is not an error for server events. The event * will be queued for transmission and we will attempt * sending it again the next time pcmk__ipc_send_iov is * called, or when the crm_ipcs_flush_events_cb happens. */ if ((rc != pcmk_rc_ok) && (rc != EAGAIN)) { goto done; } index++; break; } else { /* EAGAIN is an error for IPC messages. We don't have a * send queue for these, so we need to try again. If there * was some other error, we need to break out of this loop * and report it. * * FIXME: Retry limit for EAGAIN? */ if (rc == pcmk_rc_ok) { index++; break; } else if (rc == EAGAIN) { break; } else { goto done; } } default: /* An error occurred during preparation */ goto done; } } while (true); done: if ((rc != pcmk_rc_ok) && (rc != EAGAIN)) { crm_notice("IPC message to pid %u failed: %s " QB_XS " rc=%d", c->pid, pcmk_rc_str(rc), rc); } g_string_free(iov_buffer, TRUE); return rc; } /*! * \internal * \brief Create an acknowledgement with a status code to send to a client * * \param[in] function Calling function * \param[in] line Source file line within calling function * \param[in] flags IPC flags to use when sending * \param[in] tag Element name to use for acknowledgement * \param[in] ver IPC protocol version (can be NULL) * \param[in] status Exit status code to add to ack * * \return Newly created XML for ack * * \note The caller is responsible for freeing the return value with * \c pcmk__xml_free(). */ xmlNode * pcmk__ipc_create_ack_as(const char *function, int line, uint32_t flags, const char *tag, const char *ver, crm_exit_t status) { xmlNode *ack = NULL; if (pcmk_is_set(flags, crm_ipc_client_response)) { ack = pcmk__xe_create(NULL, tag); crm_xml_add(ack, PCMK_XA_FUNCTION, function); crm_xml_add_int(ack, PCMK__XA_LINE, line); crm_xml_add_int(ack, PCMK_XA_STATUS, (int) status); crm_xml_add(ack, PCMK__XA_IPC_PROTO_VERSION, ver); } return ack; } /*! * \internal * \brief Send an acknowledgement with a status code to a client * * \param[in] function Calling function * \param[in] line Source file line within calling function * \param[in] c Client to send ack to * \param[in] request Request ID being replied to * \param[in] flags IPC flags to use when sending * \param[in] tag Element name to use for acknowledgement * \param[in] ver IPC protocol version (can be NULL) * \param[in] status Status code to send with acknowledgement * * \return Standard Pacemaker return code */ int pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, uint32_t request, uint32_t flags, const char *tag, const char *ver, crm_exit_t status) { int rc = pcmk_rc_ok; xmlNode *ack = pcmk__ipc_create_ack_as(function, line, flags, tag, ver, status); if (ack != NULL) { crm_trace("Ack'ing IPC message from client %s as <%s status=%d>", pcmk__client_name(c), tag, status); crm_log_xml_trace(ack, "sent-ack"); c->request_id = 0; rc = pcmk__ipc_send_xml(c, request, ack, flags); pcmk__xml_free(ack); } return rc; } /*! * \internal * \brief Add an IPC server to the main loop for the CIB manager API * * \param[out] ipcs_ro New IPC server for read-only CIB manager API * \param[out] ipcs_rw New IPC server for read/write CIB manager API * \param[out] ipcs_shm New IPC server for shared-memory CIB manager API * \param[in] ro_cb IPC callbacks for read-only API * \param[in] rw_cb IPC callbacks for read/write and shared-memory APIs * * \note This function exits fatally if unable to create the servers. * \note There is no actual difference between the three IPC endpoints other * than their names. */ void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, qb_ipcs_service_t **ipcs_rw, qb_ipcs_service_t **ipcs_shm, struct qb_ipcs_service_handlers *ro_cb, struct qb_ipcs_service_handlers *rw_cb) { *ipcs_ro = mainloop_add_ipc_server(PCMK__SERVER_BASED_RO, QB_IPC_NATIVE, ro_cb); *ipcs_rw = mainloop_add_ipc_server(PCMK__SERVER_BASED_RW, QB_IPC_NATIVE, rw_cb); *ipcs_shm = mainloop_add_ipc_server(PCMK__SERVER_BASED_SHM, QB_IPC_SHM, rw_cb); if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) { crm_err("Failed to create the CIB manager: exiting and inhibiting respawn"); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled"); crm_exit(CRM_EX_FATAL); } } /*! * \internal * \brief Destroy IPC servers for the CIB manager API * * \param[out] ipcs_ro IPC server for read-only the CIB manager API * \param[out] ipcs_rw IPC server for read/write the CIB manager API * \param[out] ipcs_shm IPC server for shared-memory the CIB manager API * * \note This is a convenience function for calling qb_ipcs_destroy() for each * argument. */ void pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro, qb_ipcs_service_t *ipcs_rw, qb_ipcs_service_t *ipcs_shm) { qb_ipcs_destroy(ipcs_ro); qb_ipcs_destroy(ipcs_rw); qb_ipcs_destroy(ipcs_shm); } /*! * \internal * \brief Add an IPC server to the main loop for the controller API * * \param[in] cb IPC callbacks * * \return Newly created IPC server */ qb_ipcs_service_t * pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb) { return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb); } /*! * \internal * \brief Add an IPC server to the main loop for the attribute manager API * * \param[out] ipcs Where to store newly created IPC server * \param[in] cb IPC callbacks * * \note This function exits fatally if unable to create the servers. */ void pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server(PCMK__VALUE_ATTRD, QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_crit("Exiting fatally because unable to serve " PCMK__SERVER_ATTRD " IPC (verify pacemaker and pacemaker_remote are not both " "enabled)"); crm_exit(CRM_EX_FATAL); } } /*! * \internal * \brief Add an IPC server to the main loop for the fencer API * * \param[out] ipcs Where to store newly created IPC server * \param[in] cb IPC callbacks * * \note This function exits fatally if unable to create the servers. */ void pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server_with_prio("stonith-ng", QB_IPC_NATIVE, cb, QB_LOOP_HIGH); if (*ipcs == NULL) { crm_err("Failed to create fencer: exiting and inhibiting respawn."); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); crm_exit(CRM_EX_FATAL); } } /*! * \internal * \brief Add an IPC server to the main loop for the pacemakerd API * * \param[out] ipcs Where to store newly created IPC server * \param[in] cb IPC callbacks * * \note This function exits with CRM_EX_OSERR if unable to create the servers. */ void pcmk__serve_pacemakerd_ipc(qb_ipcs_service_t **ipcs, struct qb_ipcs_service_handlers *cb) { *ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, cb); if (*ipcs == NULL) { crm_err("Couldn't start pacemakerd IPC server"); crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); /* sub-daemons are observed by pacemakerd. Thus we exit CRM_EX_FATAL * if we want to prevent pacemakerd from restarting them. * With pacemakerd we leave the exit-code shown to e.g. systemd * to what it was prior to moving the code here from pacemakerd.c */ crm_exit(CRM_EX_OSERR); } } /*! * \internal * \brief Add an IPC server to the main loop for the scheduler API * * \param[in] cb IPC callbacks * * \return Newly created IPC server * \note This function exits fatally if unable to create the servers. */ qb_ipcs_service_t * pcmk__serve_schedulerd_ipc(struct qb_ipcs_service_handlers *cb) { return mainloop_add_ipc_server(CRM_SYSTEM_PENGINE, QB_IPC_NATIVE, cb); }