diff --git a/daemons/attrd/attrd_messages.c b/daemons/attrd/attrd_messages.c index 8c4b6803a1..6f70acbbdd 100644 --- a/daemons/attrd/attrd_messages.c +++ b/daemons/attrd/attrd_messages.c @@ -1,366 +1,366 @@ /* * Copyright 2022-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include // PRIu32 #include #include #include // pcmk__get_node() #include #include "pacemaker-attrd.h" int minimum_protocol_version = -1; static GHashTable *attrd_handlers = NULL; static bool is_sync_point_attr(xmlAttrPtr attr, void *data) { return pcmk__str_eq((const char *) attr->name, PCMK__XA_ATTR_SYNC_POINT, pcmk__str_none); } static int remove_sync_point_attribute(xmlNode *xml, void *data) { pcmk__xe_remove_matching_attrs(xml, false, is_sync_point_attr, NULL); pcmk__xe_foreach_child(xml, PCMK_XE_OP, remove_sync_point_attribute, NULL); return pcmk_rc_ok; } /* Sync points on a multi-update IPC message to an attrd too old to support * multi-update messages won't work. Strip the sync point attribute off here * so we don't pretend to support this situation and instead ACK the client * immediately. */ static void remove_unsupported_sync_points(pcmk__request_t *request) { if (request->xml->children != NULL && !ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version) && attrd_request_has_sync_point(request->xml)) { crm_warn("Ignoring sync point in request from %s because not all nodes support it", pcmk__request_origin(request)); remove_sync_point_attribute(request->xml, NULL); } } static xmlNode * handle_unknown_request(pcmk__request_t *request) { crm_err("Unknown IPC request %s from %s %s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request)); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown request type '%s' (bug?)", pcmk__s(request->op, "")); return NULL; } static xmlNode * handle_clear_failure_request(pcmk__request_t *request) { if (request->peer != NULL) { /* It is not currently possible to receive this as a peer command, * but will be, if we one day enable propagating this operation. */ attrd_peer_clear_failure(request); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { remove_unsupported_sync_points(request); if (attrd_request_has_sync_point(request->xml)) { /* If this client supplied a sync point it wants to wait for, add it to * the wait list. Clients on this list will not receive an ACK until * their sync point is hit which will result in the client stalled there * until it receives a response. * * All other clients will receive the expected response as normal. */ attrd_add_client_to_waitlist(request); } else { /* If the client doesn't want to wait for a sync point, go ahead and send * the ACK immediately. Otherwise, we'll send the ACK when the appropriate * sync point is reached. */ attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags); } return attrd_client_clear_failure(request); } } static xmlNode * handle_confirm_request(pcmk__request_t *request) { if (request->peer != NULL) { int callid; crm_debug("Received confirmation from %s", request->peer); if (pcmk__xe_get_int(request->xml, PCMK__XA_CALL_ID, &callid) != pcmk_rc_ok) { pcmk__set_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Could not get callid from XML"); } else { attrd_handle_confirmation(callid, request->peer); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { return handle_unknown_request(request); } } static xmlNode * handle_query_request(pcmk__request_t *request) { if (request->peer != NULL) { return handle_unknown_request(request); } else { return attrd_client_query(request); } } static xmlNode * handle_remove_request(pcmk__request_t *request) { if (request->peer != NULL) { const char *host = pcmk__xe_get(request->xml, PCMK__XA_ATTR_HOST); bool reap = false; if (pcmk__xe_get_bool(request->xml, PCMK__XA_REAP, &reap) != pcmk_rc_ok) { reap = true; // Default to true for backward compatibility } attrd_peer_remove(host, reap, request->peer); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { return attrd_client_peer_remove(request); } } static xmlNode * handle_refresh_request(pcmk__request_t *request) { if (request->peer != NULL) { return handle_unknown_request(request); } else { return attrd_client_refresh(request); } } static xmlNode * handle_sync_response_request(pcmk__request_t *request) { if (request->ipc_client != NULL) { return handle_unknown_request(request); } else { if (request->peer != NULL) { pcmk__node_status_t *peer = pcmk__get_node(0, request->peer, NULL, pcmk__node_search_cluster_member); bool peer_won = attrd_check_for_new_writer(peer, request->xml); if (!pcmk__str_eq(peer->name, attrd_cluster->priv->node_name, pcmk__str_casei)) { attrd_peer_sync_response(peer, peer_won, request->xml); } } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } } static xmlNode * handle_update_request(pcmk__request_t *request) { if (request->peer != NULL) { const char *host = pcmk__xe_get(request->xml, PCMK__XA_ATTR_HOST); pcmk__node_status_t *peer = pcmk__get_node(0, request->peer, NULL, pcmk__node_search_cluster_member); attrd_peer_update(peer, request->xml, host, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } else { remove_unsupported_sync_points(request); if (attrd_request_has_sync_point(request->xml)) { /* If this client supplied a sync point it wants to wait for, add it to * the wait list. Clients on this list will not receive an ACK until * their sync point is hit which will result in the client stalled there * until it receives a response. * * All other clients will receive the expected response as normal. */ attrd_add_client_to_waitlist(request); } else { /* If the client doesn't want to wait for a sync point, go ahead and send * the ACK immediately. Otherwise, we'll send the ACK when the appropriate * sync point is reached. * * In the normal case, attrd_client_update can be called recursively which * makes where to send the ACK tricky. Doing it here ensures the client * only ever receives one. */ attrd_send_ack(request->ipc_client, request->ipc_id, request->flags|crm_ipc_client_response); } return attrd_client_update(request); } } static void attrd_register_handlers(void) { pcmk__server_command_t handlers[] = { { PCMK__ATTRD_CMD_CLEAR_FAILURE, handle_clear_failure_request }, { PCMK__ATTRD_CMD_CONFIRM, handle_confirm_request }, { PCMK__ATTRD_CMD_PEER_REMOVE, handle_remove_request }, { PCMK__ATTRD_CMD_QUERY, handle_query_request }, { PCMK__ATTRD_CMD_REFRESH, handle_refresh_request }, { PCMK__ATTRD_CMD_SYNC_RESPONSE, handle_sync_response_request }, { PCMK__ATTRD_CMD_UPDATE, handle_update_request }, { PCMK__ATTRD_CMD_UPDATE_DELAY, handle_update_request }, { PCMK__ATTRD_CMD_UPDATE_BOTH, handle_update_request }, { NULL, handle_unknown_request }, }; attrd_handlers = pcmk__register_handlers(handlers); } void attrd_unregister_handlers(void) { if (attrd_handlers != NULL) { g_hash_table_destroy(attrd_handlers); attrd_handlers = NULL; } } void attrd_handle_request(pcmk__request_t *request) { xmlNode *reply = NULL; char *log_msg = NULL; const char *exec_status_s = NULL; const char *reason = NULL; if (attrd_handlers == NULL) { attrd_register_handlers(); } reply = pcmk__process_request(request, attrd_handlers); if (reply != NULL) { crm_log_xml_trace(reply, "Reply"); if (request->ipc_client != NULL) { pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply, request->ipc_flags); } else { crm_err("Not sending CPG reply to client"); } pcmk__xml_free(reply); } exec_status_s = pcmk_exec_status_str(request->result.execution_status); reason = request->result.exit_reason; log_msg = pcmk__assert_asprintf("Processed %s request from %s %s: %s%s%s%s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request), exec_status_s, (reason == NULL)? "" : " (", pcmk__s(reason, ""), (reason == NULL)? "" : ")"); if (!pcmk__result_ok(&request->result)) { crm_warn("%s", log_msg); } else { crm_debug("%s", log_msg); } free(log_msg); pcmk__reset_request(request); } /*! \internal \brief Send or broadcast private attribute for local node with protocol version */ void attrd_send_protocol(const pcmk__node_status_t *peer) { xmlNode *attrd_op = pcmk__xe_create(NULL, __func__); pcmk__xe_set(attrd_op, PCMK__XA_T, PCMK__VALUE_ATTRD); pcmk__xe_set(attrd_op, PCMK__XA_SRC, crm_system_name); pcmk__xe_set(attrd_op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); pcmk__xe_set(attrd_op, PCMK__XA_ATTR_NAME, CRM_ATTR_PROTOCOL); pcmk__xe_set(attrd_op, PCMK__XA_ATTR_VALUE, ATTRD_PROTOCOL_VERSION); pcmk__xe_set_int(attrd_op, PCMK__XA_ATTR_IS_PRIVATE, 1); pcmk__xe_set(attrd_op, PCMK__XA_ATTR_HOST, attrd_cluster->priv->node_name); pcmk__xe_set(attrd_op, PCMK__XA_ATTR_HOST_ID, attrd_cluster->priv->node_xml_id); if (peer == NULL) { crm_debug("Broadcasting attrd protocol version %s for node %s[%" PRIu32 "]", ATTRD_PROTOCOL_VERSION, pcmk__s(attrd_cluster->priv->node_name, "unknown"), attrd_cluster->priv->node_id); } else { crm_debug("Sending attrd protocol version %s for node %s[%" PRIu32 "] to node %s[%" PRIu32 "]", ATTRD_PROTOCOL_VERSION, pcmk__s(attrd_cluster->priv->node_name, "unknown"), attrd_cluster->priv->node_id, pcmk__s(peer->name, "unknown"), peer->cluster_layer_id); } attrd_send_message(peer, attrd_op, false); /* ends up at attrd_peer_message() */ pcmk__xml_free(attrd_op); } gboolean attrd_send_message(const pcmk__node_status_t *node, xmlNode *data, bool confirm) { const char *op = pcmk__xe_get(data, PCMK_XA_TASK); pcmk__xe_set(data, PCMK__XA_T, PCMK__VALUE_ATTRD); pcmk__xe_set(data, PCMK__XA_ATTR_VERSION, ATTRD_PROTOCOL_VERSION); /* Request a confirmation from the destination peer node (which could * be all if node is NULL) that the message has been received and * acted upon. */ if (!pcmk__str_eq(op, PCMK__ATTRD_CMD_CONFIRM, pcmk__str_none)) { - pcmk__xe_set_bool_attr(data, PCMK__XA_CONFIRM, confirm); + pcmk__xe_set_bool(data, PCMK__XA_CONFIRM, confirm); } attrd_xml_add_writer(data); return pcmk__cluster_send_message(node, pcmk_ipc_attrd, data); } diff --git a/daemons/based/based_messages.c b/daemons/based/based_messages.c index 0387dcdc35..2625795cc5 100644 --- a/daemons/based/based_messages.c +++ b/daemons/based/based_messages.c @@ -1,526 +1,526 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Maximum number of diffs to ignore while waiting for a resync */ #define MAX_DIFF_RETRY 5 bool based_is_primary = false; xmlNode *the_cib = NULL; int cib_process_shutdown_req(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { const char *host = pcmk__xe_get(req, PCMK__XA_SRC); *answer = NULL; if (pcmk__xe_get(req, PCMK__XA_CIB_ISREPLYTO) == NULL) { crm_info("Peer %s is requesting to shut down", host); return pcmk_ok; } if (cib_shutdown_flag == FALSE) { crm_err("Peer %s mistakenly thinks we wanted to shut down", host); return -EINVAL; } crm_info("Exiting after %s acknowledged our shutdown request", host); terminate_cib(CRM_EX_OK); return pcmk_ok; } // @COMPAT: Remove when PCMK__CIB_REQUEST_NOOP is removed int cib_process_noop(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { crm_trace("Processing \"%s\" event", op); *answer = NULL; return pcmk_ok; } int cib_process_readwrite(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int result = pcmk_ok; crm_trace("Processing \"%s\" event", op); // @COMPAT Pacemaker Remote clients <3.0.0 may send this if (pcmk__str_eq(op, PCMK__CIB_REQUEST_IS_PRIMARY, pcmk__str_none)) { if (based_is_primary) { result = pcmk_ok; } else { result = -EPERM; } return result; } if (pcmk__str_eq(op, PCMK__CIB_REQUEST_PRIMARY, pcmk__str_none)) { if (!based_is_primary) { crm_info("We are now in R/W mode"); based_is_primary = true; } else { crm_debug("We are still in R/W mode"); } } else if (based_is_primary) { crm_info("We are now in R/O mode"); based_is_primary = false; } return result; } /* Set to 1 when a sync is requested, incremented when a diff is ignored, * reset to 0 when a sync is received */ static int sync_in_progress = 0; void send_sync_request(const char *host) { xmlNode *sync_me = pcmk__xe_create(NULL, "sync-me"); pcmk__node_status_t *peer = NULL; crm_info("Requesting re-sync from %s", (host? host : "all peers")); sync_in_progress = 1; pcmk__xe_set(sync_me, PCMK__XA_T, PCMK__VALUE_CIB); pcmk__xe_set(sync_me, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_SYNC_TO_ONE); pcmk__xe_set(sync_me, PCMK__XA_CIB_DELEGATED_FROM, OUR_NODENAME); if (host != NULL) { peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); } pcmk__cluster_send_message(peer, pcmk_ipc_based, sync_me); pcmk__xml_free(sync_me); } int cib_process_ping(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { const char *host = pcmk__xe_get(req, PCMK__XA_SRC); const char *seq = pcmk__xe_get(req, PCMK__XA_CIB_PING_ID); char *digest = pcmk__digest_xml(the_cib, true); xmlNode *wrapper = NULL; crm_trace("Processing \"%s\" event %s from %s", op, seq, host); *answer = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); pcmk__xe_set(*answer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); pcmk__xe_set(*answer, PCMK_XA_DIGEST, digest); pcmk__xe_set(*answer, PCMK__XA_CIB_PING_ID, seq); wrapper = pcmk__xe_create(*answer, PCMK__XE_CIB_CALLDATA); if (the_cib != NULL) { pcmk__if_tracing( { /* Append additional detail so the receiver can log the * differences */ pcmk__xml_copy(wrapper, the_cib); }, { // Always include at least the version details const char *name = (const char *) the_cib->name; xmlNode *shallow = pcmk__xe_create(wrapper, name); pcmk__xe_copy_attrs(shallow, the_cib, pcmk__xaf_none); } ); } crm_info("Reporting our current digest to %s: %s for %s.%s.%s", host, digest, pcmk__xe_get(existing_cib, PCMK_XA_ADMIN_EPOCH), pcmk__xe_get(existing_cib, PCMK_XA_EPOCH), pcmk__xe_get(existing_cib, PCMK_XA_NUM_UPDATES)); free(digest); return pcmk_ok; } int cib_process_sync(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { return sync_our_cib(req, TRUE); } int cib_process_upgrade_server(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int rc = pcmk_ok; *answer = NULL; if (pcmk__xe_get(req, PCMK__XA_CIB_SCHEMA_MAX) != NULL) { /* The originator of an upgrade request sends it to the DC, without * PCMK__XA_CIB_SCHEMA_MAX. If an upgrade is needed, the DC * re-broadcasts the request with PCMK__XA_CIB_SCHEMA_MAX, and each node * performs the upgrade (and notifies its local clients) here. */ return cib_process_upgrade( op, options, section, req, input, existing_cib, result_cib, answer); } else { xmlNode *scratch = pcmk__xml_copy(NULL, existing_cib); const char *host = pcmk__xe_get(req, PCMK__XA_SRC); const char *original_schema = NULL; const char *new_schema = NULL; const char *client_id = pcmk__xe_get(req, PCMK__XA_CIB_CLIENTID); const char *call_opts = pcmk__xe_get(req, PCMK__XA_CIB_CALLOPT); const char *call_id = pcmk__xe_get(req, PCMK__XA_CIB_CALLID); crm_trace("Processing \"%s\" event", op); original_schema = pcmk__xe_get(existing_cib, PCMK_XA_VALIDATE_WITH); if (original_schema == NULL) { crm_info("Rejecting upgrade request from %s: No " PCMK_XA_VALIDATE_WITH, host); return -pcmk_err_cib_corrupt; } rc = pcmk__update_schema(&scratch, NULL, true, true); rc = pcmk_rc2legacy(rc); new_schema = pcmk__xe_get(scratch, PCMK_XA_VALIDATE_WITH); if (pcmk__cmp_schemas_by_name(new_schema, original_schema) > 0) { xmlNode *up = pcmk__xe_create(NULL, __func__); rc = pcmk_ok; crm_notice("Upgrade request from %s verified", host); pcmk__xe_set(up, PCMK__XA_T, PCMK__VALUE_CIB); pcmk__xe_set(up, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_UPGRADE); pcmk__xe_set(up, PCMK__XA_CIB_SCHEMA_MAX, new_schema); pcmk__xe_set(up, PCMK__XA_CIB_DELEGATED_FROM, host); pcmk__xe_set(up, PCMK__XA_CIB_CLIENTID, client_id); pcmk__xe_set(up, PCMK__XA_CIB_CALLOPT, call_opts); pcmk__xe_set(up, PCMK__XA_CIB_CALLID, call_id); pcmk__cluster_send_message(NULL, pcmk_ipc_based, up); pcmk__xml_free(up); } else if(rc == pcmk_ok) { rc = -pcmk_err_schema_unchanged; } if (rc != pcmk_ok) { // Notify originating peer so it can notify its local clients pcmk__node_status_t *origin = NULL; origin = pcmk__search_node_caches(0, host, NULL, pcmk__node_search_cluster_member); crm_info("Rejecting upgrade request from %s: %s " QB_XS " rc=%d peer=%s", host, pcmk_strerror(rc), rc, (origin? origin->name : "lost")); if (origin) { xmlNode *up = pcmk__xe_create(NULL, __func__); pcmk__xe_set(up, PCMK__XA_T, PCMK__VALUE_CIB); pcmk__xe_set(up, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_UPGRADE); pcmk__xe_set(up, PCMK__XA_CIB_DELEGATED_FROM, host); pcmk__xe_set(up, PCMK__XA_CIB_ISREPLYTO, host); pcmk__xe_set(up, PCMK__XA_CIB_CLIENTID, client_id); pcmk__xe_set(up, PCMK__XA_CIB_CALLOPT, call_opts); pcmk__xe_set(up, PCMK__XA_CIB_CALLID, call_id); pcmk__xe_set_int(up, PCMK__XA_CIB_UPGRADE_RC, rc); if (!pcmk__cluster_send_message(origin, pcmk_ipc_based, up)) { crm_warn("Could not send CIB upgrade result to %s", host); } pcmk__xml_free(up); } } pcmk__xml_free(scratch); } return rc; } int cib_process_sync_one(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { return sync_our_cib(req, FALSE); } int cib_server_process_diff(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int rc = pcmk_ok; if (sync_in_progress > MAX_DIFF_RETRY) { /* Don't ignore diffs forever; the last request may have been lost. * If the diff fails, we'll ask for another full resync. */ sync_in_progress = 0; } // The primary instance should never ignore a diff if (sync_in_progress && !based_is_primary) { int diff_add_updates = 0; int diff_add_epoch = 0; int diff_add_admin_epoch = 0; int diff_del_updates = 0; int diff_del_epoch = 0; int diff_del_admin_epoch = 0; cib_diff_version_details(input, &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); sync_in_progress++; crm_notice("Not applying diff %d.%d.%d -> %d.%d.%d (sync in progress)", diff_del_admin_epoch, diff_del_epoch, diff_del_updates, diff_add_admin_epoch, diff_add_epoch, diff_add_updates); return -pcmk_err_diff_resync; } rc = cib_process_diff(op, options, section, req, input, existing_cib, result_cib, answer); crm_trace("result: %s (%d), %s", pcmk_strerror(rc), rc, (based_is_primary? "primary": "secondary")); if ((rc == -pcmk_err_diff_resync) && !based_is_primary) { pcmk__xml_free(*result_cib); *result_cib = NULL; send_sync_request(NULL); } else if (rc == -pcmk_err_diff_resync) { rc = -pcmk_err_diff_failed; if (options & cib_force_diff) { crm_warn("Not requesting full refresh in R/W mode"); } } return rc; } int cib_process_replace_svr(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { int rc = cib_process_replace(op, options, section, req, input, existing_cib, result_cib, answer); if ((rc == pcmk_ok) && pcmk__xe_is(input, PCMK_XE_CIB)) { sync_in_progress = 0; } return rc; } /* @COMPAT: Remove when PCMK__CIB_REQUEST_ABS_DELETE is removed * (At least external client code <3.0.0 can send it) */ int cib_process_delete_absolute(const char *op, int options, const char *section, xmlNode * req, xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer) { return -EINVAL; } static xmlNode * cib_msg_copy(xmlNode *msg) { static const char *field_list[] = { PCMK__XA_T, PCMK__XA_CIB_CLIENTID, PCMK__XA_CIB_CALLOPT, PCMK__XA_CIB_CALLID, PCMK__XA_CIB_OP, PCMK__XA_CIB_ISREPLYTO, PCMK__XA_CIB_SECTION, PCMK__XA_CIB_HOST, PCMK__XA_CIB_RC, PCMK__XA_CIB_DELEGATED_FROM, PCMK__XA_CIB_UPDATE, PCMK__XA_CIB_CLIENTNAME, PCMK__XA_CIB_USER, PCMK__XA_CIB_NOTIFY_TYPE, PCMK__XA_CIB_NOTIFY_ACTIVATE, }; xmlNode *copy = pcmk__xe_create(NULL, PCMK__XE_COPY); for (int lpc = 0; lpc < PCMK__NELEM(field_list); lpc++) { const char *field = field_list[lpc]; const char *value = pcmk__xe_get(msg, field); pcmk__xe_set(copy, field, value); } return copy; } int sync_our_cib(xmlNode * request, gboolean all) { int result = pcmk_ok; char *digest = NULL; const char *host = pcmk__xe_get(request, PCMK__XA_SRC); const char *op = pcmk__xe_get(request, PCMK__XA_CIB_OP); pcmk__node_status_t *peer = NULL; xmlNode *replace_request = NULL; xmlNode *wrapper = NULL; CRM_CHECK(the_cib != NULL, return -EINVAL); CRM_CHECK(all || (host != NULL), return -EINVAL); crm_debug("Syncing CIB to %s", all ? "all peers" : host); replace_request = cib_msg_copy(request); if (host != NULL) { pcmk__xe_set(replace_request, PCMK__XA_CIB_ISREPLYTO, host); } if (all) { pcmk__xe_remove_attr(replace_request, PCMK__XA_CIB_HOST); } pcmk__xe_set(replace_request, PCMK__XA_CIB_OP, PCMK__CIB_REQUEST_REPLACE); // @TODO Keep for tracing, or drop? pcmk__xe_set(replace_request, PCMK__XA_ORIGINAL_CIB_OP, op); - pcmk__xe_set_bool_attr(replace_request, PCMK__XA_CIB_UPDATE, true); + pcmk__xe_set_bool(replace_request, PCMK__XA_CIB_UPDATE, true); pcmk__xe_set(replace_request, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); digest = pcmk__digest_xml(the_cib, true); pcmk__xe_set(replace_request, PCMK_XA_DIGEST, digest); wrapper = pcmk__xe_create(replace_request, PCMK__XE_CIB_CALLDATA); pcmk__xml_copy(wrapper, the_cib); if (!all) { peer = pcmk__get_node(0, host, NULL, pcmk__node_search_cluster_member); } if (!pcmk__cluster_send_message(peer, pcmk_ipc_based, replace_request)) { result = -ENOTCONN; } pcmk__xml_free(replace_request); free(digest); return result; } int cib_process_commit_transaction(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { /* On success, our caller will activate *result_cib locally, trigger a * replace notification if appropriate, and sync *result_cib to all nodes. * On failure, our caller will free *result_cib. */ int rc = pcmk_rc_ok; const char *client_id = pcmk__xe_get(req, PCMK__XA_CIB_CLIENTID); const char *origin = pcmk__xe_get(req, PCMK__XA_SRC); pcmk__client_t *client = pcmk__find_client_by_id(client_id); rc = based_commit_transaction(input, client, origin, result_cib); if (rc != pcmk_rc_ok) { char *source = based_transaction_source_str(client, origin); crm_err("Could not commit transaction for %s: %s", source, pcmk_rc_str(rc)); free(source); } return pcmk_rc2legacy(rc); } int cib_process_schemas(const char *op, int options, const char *section, xmlNode *req, xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib, xmlNode **answer) { xmlNode *wrapper = NULL; xmlNode *data = NULL; const char *after_ver = NULL; GList *schemas = NULL; GList *already_included = NULL; *answer = pcmk__xe_create(NULL, PCMK__XA_SCHEMAS); wrapper = pcmk__xe_first_child(req, PCMK__XE_CIB_CALLDATA, NULL, NULL); data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (data == NULL) { crm_warn("No data specified in request"); return -EPROTO; } after_ver = pcmk__xe_get(data, PCMK_XA_VERSION); if (after_ver == NULL) { crm_warn("No version specified in request"); return -EPROTO; } /* The client requested all schemas after the latest one we know about, which * means the client is fully up-to-date. Return a properly formatted reply * with no schemas. */ if (pcmk__str_eq(after_ver, pcmk__highest_schema_name(), pcmk__str_none)) { return pcmk_ok; } schemas = pcmk__schema_files_later_than(after_ver); for (GList *iter = schemas; iter != NULL; iter = iter->next) { pcmk__build_schema_xml_node(*answer, iter->data, &already_included); } g_list_free_full(schemas, free); g_list_free_full(already_included, free); return pcmk_ok; } diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c index 6146b662a2..b2e3ed28c7 100644 --- a/daemons/controld/controld_cib.c +++ b/daemons/controld/controld_cib.c @@ -1,1060 +1,1060 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include /* sleep */ #include #include #include #include #include // Call ID of the most recent in-progress CIB resource update (or 0 if none) static int pending_rsc_update = 0; /*! * \internal * \brief Respond to a dropped CIB connection * * \param[in] user_data CIB connection that dropped */ static void handle_cib_disconnect(gpointer user_data) { CRM_LOG_ASSERT(user_data == controld_globals.cib_conn); controld_trigger_fsa(); controld_globals.cib_conn->state = cib_disconnected; if (pcmk__is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) { // @TODO This should trigger a reconnect, not a shutdown crm_crit("Lost connection to the CIB manager, shutting down"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); controld_clear_fsa_input_flags(R_CIB_CONNECTED); } else { // Expected crm_info("Disconnected from the CIB manager"); } } static void do_cib_updated(const char *event, xmlNode * msg) { const xmlNode *patchset = NULL; const char *client_name = NULL; crm_debug("Received CIB diff notification: DC=%s", pcmk__btoa(AM_I_DC)); if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) { return; } if (pcmk__cib_element_in_patchset(patchset, PCMK_XE_ALERTS) || pcmk__cib_element_in_patchset(patchset, PCMK_XE_CRM_CONFIG)) { controld_trigger_config(); } if (!AM_I_DC) { // We're not in control of the join sequence return; } client_name = pcmk__xe_get(msg, PCMK__XA_CIB_CLIENTNAME); if (!cib__client_triggers_refresh(client_name)) { // The CIB is still accurate return; } if (pcmk__cib_element_in_patchset(patchset, PCMK_XE_NODES) || pcmk__cib_element_in_patchset(patchset, PCMK_XE_STATUS)) { /* An unsafe client modified the PCMK_XE_NODES or PCMK_XE_STATUS * section. Ensure the node list is up-to-date, and start the join * process again so we get everyone's current resource history. */ if (client_name == NULL) { client_name = pcmk__xe_get(msg, PCMK__XA_CIB_CLIENTID); } crm_notice("Populating nodes and starting an election after %s event " "triggered by %s", event, pcmk__s(client_name, "(unidentified client)")); populate_cib_nodes(controld_node_update_quick|controld_node_update_all, __func__); register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); } } void controld_disconnect_cib_manager(void) { cib_t *cib_conn = controld_globals.cib_conn; pcmk__assert(cib_conn != NULL); crm_debug("Disconnecting from the CIB manager"); controld_clear_fsa_input_flags(R_CIB_CONNECTED); cib_conn->cmds->del_notify_callback(cib_conn, PCMK__VALUE_CIB_DIFF_NOTIFY, do_cib_updated); cib_free_callbacks(cib_conn); if (cib_conn->state != cib_disconnected) { cib_conn->cmds->set_secondary(cib_conn, cib_discard_reply); cib_conn->cmds->signoff(cib_conn); } } /* A_CIB_STOP, A_CIB_START, O_CIB_RESTART */ void do_cib_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { static int cib_retries = 0; cib_t *cib_conn = controld_globals.cib_conn; void (*dnotify_fn) (gpointer user_data) = handle_cib_disconnect; void (*update_cb) (const char *event, xmlNodePtr msg) = do_cib_updated; int rc = pcmk_ok; pcmk__assert(cib_conn != NULL); if (pcmk__is_set(action, A_CIB_STOP)) { if ((cib_conn->state != cib_disconnected) && (pending_rsc_update != 0)) { crm_info("Waiting for resource update %d to complete", pending_rsc_update); crmd_fsa_stall(FALSE); return; } controld_disconnect_cib_manager(); } if (!pcmk__is_set(action, A_CIB_START)) { return; } if (cur_state == S_STOPPING) { crm_err("Ignoring request to connect to the CIB manager after " "shutdown"); return; } rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command_nonblocking); if (rc != pcmk_ok) { // A short wait that usually avoids stalling the FSA sleep(1); rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command_nonblocking); } if (rc != pcmk_ok) { crm_info("Could not connect to the CIB manager: %s", pcmk_strerror(rc)); } else if (cib_conn->cmds->set_connection_dnotify(cib_conn, dnotify_fn) != pcmk_ok) { crm_err("Could not set dnotify callback"); } else if (cib_conn->cmds->add_notify_callback(cib_conn, PCMK__VALUE_CIB_DIFF_NOTIFY, update_cb) != pcmk_ok) { crm_err("Could not set CIB notification callback (update)"); } else { controld_set_fsa_input_flags(R_CIB_CONNECTED); cib_retries = 0; } if (!pcmk__is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) { cib_retries++; if (cib_retries < 30) { crm_warn("Couldn't complete CIB registration %d times... " "pause and retry", cib_retries); controld_start_wait_timer(); crmd_fsa_stall(FALSE); } else { crm_err("Could not complete CIB registration %d times... " "hard error", cib_retries); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } } #define MIN_CIB_OP_TIMEOUT (30) /*! * \internal * \brief Get the timeout (in seconds) that should be used with CIB operations * * \return The maximum of 30 seconds, the value of the PCMK_cib_timeout * environment variable, or 10 seconds times one more than the number of * nodes in the cluster. */ unsigned int cib_op_timeout(void) { unsigned int calculated_timeout = 10U * (pcmk__cluster_num_active_nodes() + pcmk__cluster_num_remote_nodes() + 1U); calculated_timeout = QB_MAX(calculated_timeout, MIN_CIB_OP_TIMEOUT); crm_trace("Calculated timeout: %s", pcmk__readable_interval(calculated_timeout * 1000)); if (controld_globals.cib_conn) { controld_globals.cib_conn->call_timeout = calculated_timeout; } return calculated_timeout; } /*! * \internal * \brief Get CIB call options to use local scope if primary is unavailable * * \return CIB call options */ int crmd_cib_smart_opt(void) { int call_opt = cib_none; if ((controld_globals.fsa_state == S_ELECTION) || (controld_globals.fsa_state == S_PENDING)) { crm_info("Sending update to local CIB in state: %s", fsa_state2string(controld_globals.fsa_state)); cib__set_call_options(call_opt, "update", cib_none); } return call_opt; } static void cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { char *desc = user_data; if (rc == 0) { crm_debug("Deletion of %s (via CIB call %d) succeeded", desc, call_id); } else { crm_warn("Deletion of %s (via CIB call %d) failed: %s " QB_XS " rc=%d", desc, call_id, pcmk_strerror(rc), rc); } } // Searches for various portions of PCMK__XE_NODE_STATE to delete // Match a particular node's PCMK__XE_NODE_STATE (takes node name 1x) #define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" // Node's lrm section (name 1x) #define XPATH_NODE_LRM XPATH_NODE_STATE "/" PCMK__XE_LRM /* Node's PCMK__XE_LRM_RSC_OP entries and PCMK__XE_LRM_RESOURCE entries without * unexpired lock * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x) */ #define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" PCMK__XE_LRM_RSC_OP \ "|" XPATH_NODE_STATE \ "//" PCMK__XE_LRM_RESOURCE \ "[not(@" PCMK_OPT_SHUTDOWN_LOCK ") " \ "or " PCMK_OPT_SHUTDOWN_LOCK "<%lld]" // Node's PCMK__XE_TRANSIENT_ATTRIBUTES section (name 1x) #define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" PCMK__XE_TRANSIENT_ATTRIBUTES // Everything under PCMK__XE_NODE_STATE (name 1x) #define XPATH_NODE_ALL XPATH_NODE_STATE "/*" /* Unlocked history + transient attributes * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x, name 1x) */ #define XPATH_NODE_ALL_UNLOCKED XPATH_NODE_LRM_UNLOCKED "|" XPATH_NODE_ATTRS /*! * \internal * \brief Get the XPath and description of a node state section to be deleted * * \param[in] uname Desired node * \param[in] section Subsection of \c PCMK__XE_NODE_STATE to be deleted * \param[out] xpath Where to store XPath of \p section * \param[out] desc If not \c NULL, where to store description of \p section */ void controld_node_state_deletion_strings(const char *uname, enum controld_section_e section, char **xpath, char **desc) { const char *desc_pre = NULL; // Shutdown locks that started before this time are expired long long expire = (long long) time(NULL) - controld_globals.shutdown_lock_limit; switch (section) { case controld_section_lrm: *xpath = pcmk__assert_asprintf(XPATH_NODE_LRM, uname); desc_pre = "resource history"; break; case controld_section_lrm_unlocked: *xpath = pcmk__assert_asprintf(XPATH_NODE_LRM_UNLOCKED, uname, uname, expire); desc_pre = "resource history (other than shutdown locks)"; break; case controld_section_attrs: *xpath = pcmk__assert_asprintf(XPATH_NODE_ATTRS, uname); desc_pre = "transient attributes"; break; case controld_section_all: *xpath = pcmk__assert_asprintf(XPATH_NODE_ALL, uname); desc_pre = "all state"; break; case controld_section_all_unlocked: *xpath = pcmk__assert_asprintf(XPATH_NODE_ALL_UNLOCKED, uname, uname, expire, uname); desc_pre = "all state (other than shutdown locks)"; break; default: // We called this function incorrectly pcmk__assert(false); break; } if (desc != NULL) { *desc = pcmk__assert_asprintf("%s for node %s", desc_pre, uname); } } /*! * \internal * \brief Delete subsection of a node's CIB \c PCMK__XE_NODE_STATE * * \param[in] uname Desired node * \param[in] section Subsection of \c PCMK__XE_NODE_STATE to delete * \param[in] options CIB call options to use */ void controld_delete_node_state(const char *uname, enum controld_section_e section, int options) { cib_t *cib = controld_globals.cib_conn; char *xpath = NULL; char *desc = NULL; int cib_rc = pcmk_ok; pcmk__assert((uname != NULL) && (cib != NULL)); controld_node_state_deletion_strings(uname, section, &xpath, &desc); cib__set_call_options(options, "node state deletion", cib_xpath|cib_multiple); cib_rc = cib->cmds->remove(cib, xpath, NULL, options); fsa_register_cib_callback(cib_rc, desc, cib_delete_callback); crm_info("Deleting %s (via CIB call %d) " QB_XS " xpath=%s", desc, cib_rc, xpath); // CIB library handles freeing desc free(xpath); } // Takes node name and resource ID #define XPATH_RESOURCE_HISTORY "//" PCMK__XE_NODE_STATE \ "[@" PCMK_XA_UNAME "='%s']/" \ PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES \ "/" PCMK__XE_LRM_RESOURCE \ "[@" PCMK_XA_ID "='%s']" // @TODO could add "and @PCMK_OPT_SHUTDOWN_LOCK" to limit to locks /*! * \internal * \brief Clear resource history from CIB for a given resource and node * * \param[in] rsc_id ID of resource to be cleared * \param[in] node Node whose resource history should be cleared * \param[in] user_name ACL user name to use * \param[in] call_options CIB call options * * \return Standard Pacemaker return code */ int controld_delete_resource_history(const char *rsc_id, const char *node, const char *user_name, int call_options) { char *desc = NULL; char *xpath = NULL; int rc = pcmk_rc_ok; cib_t *cib = controld_globals.cib_conn; CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL); desc = pcmk__assert_asprintf("resource history for %s on %s", rsc_id, node); if (cib == NULL) { crm_err("Unable to clear %s: no CIB connection", desc); free(desc); return ENOTCONN; } // Ask CIB to delete the entry xpath = pcmk__assert_asprintf(XPATH_RESOURCE_HISTORY, node, rsc_id); cib->cmds->set_user(cib, user_name); rc = cib->cmds->remove(cib, xpath, NULL, call_options|cib_xpath); cib->cmds->set_user(cib, NULL); if (rc < 0) { rc = pcmk_legacy2rc(rc); crm_err("Could not delete resource status of %s on %s%s%s: %s " QB_XS " rc=%d", rsc_id, node, (user_name? " for user " : ""), (user_name? user_name : ""), pcmk_rc_str(rc), rc); free(desc); free(xpath); return rc; } if (pcmk__is_set(call_options, cib_sync_call)) { if (pcmk__is_set(call_options, cib_dryrun)) { crm_debug("Deletion of %s would succeed", desc); } else { crm_debug("Deletion of %s succeeded", desc); } free(desc); } else { crm_info("Clearing %s (via CIB call %d) " QB_XS " xpath=%s", desc, rc, xpath); fsa_register_cib_callback(rc, desc, cib_delete_callback); // CIB library handles freeing desc } free(xpath); return pcmk_rc_ok; } /*! * \internal * \brief Build XML and string of parameters meeting some criteria, for digest * * \param[in] op Executor event with parameter table to use * \param[in] metadata Parsed meta-data for executed resource agent * \param[in] param_type Flag used for selection criteria * \param[out] result Will be set to newly created XML with selected * parameters as attributes * * \return Newly allocated space-separated string of parameter names * \note Selection criteria varies by param_type: for the restart digest, we * want parameters that are *not* marked reloadable (OCF 1.1) or that * *are* marked unique (pre-1.1), for both string and XML results; for the * secure digest, we want parameters that *are* marked private for the * string, but parameters that are *not* marked private for the XML. * \note It is the caller's responsibility to free the string return value with * \p g_string_free() and the XML result with \p pcmk__xml_free(). */ static GString * build_parameter_list(const lrmd_event_data_t *op, const struct ra_metadata_s *metadata, enum ra_param_flags_e param_type, xmlNode **result) { GString *list = NULL; *result = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS); /* Consider all parameters only except private ones to be consistent with * what scheduler does with calculate_secure_digest(). */ if (param_type == ra_param_private && compare_version(controld_globals.dc_version, "3.16.0") >= 0) { g_hash_table_foreach(op->params, hash2field, *result); pcmk__filter_op_for_digest(*result); } for (GList *iter = metadata->ra_params; iter != NULL; iter = iter->next) { struct ra_param_s *param = (struct ra_param_s *) iter->data; bool accept_for_list = false; bool accept_for_xml = false; switch (param_type) { case ra_param_reloadable: accept_for_list = !pcmk__is_set(param->rap_flags, param_type); accept_for_xml = accept_for_list; break; case ra_param_unique: accept_for_list = pcmk__is_set(param->rap_flags, param_type); accept_for_xml = accept_for_list; break; case ra_param_private: accept_for_list = pcmk__is_set(param->rap_flags, param_type); accept_for_xml = !accept_for_list; break; } if (accept_for_list) { crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type)); if (list == NULL) { // We will later search for " WORD ", so start list with a space pcmk__add_word(&list, 256, " "); } pcmk__add_word(&list, 0, param->rap_name); } else { crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type)); } if (accept_for_xml) { const char *v = g_hash_table_lookup(op->params, param->rap_name); if (v != NULL) { crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v); pcmk__xe_set(*result, param->rap_name, v); } } else { crm_trace("Removing attr %s from the xml result", param->rap_name); pcmk__xe_remove_attr(*result, param->rap_name); } } if (list != NULL) { // We will later search for " WORD ", so end list with a space pcmk__add_word(&list, 0, " "); } return list; } static void append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { GString *list = NULL; char *digest = NULL; xmlNode *restart = NULL; CRM_LOG_ASSERT(op->params != NULL); if (op->interval_ms > 0) { /* monitors are not reloadable */ return; } if (pcmk__is_set(metadata->ra_flags, ra_supports_reload_agent)) { /* Add parameters not marked reloadable to the PCMK__XA_OP_FORCE_RESTART * list */ list = build_parameter_list(op, metadata, ra_param_reloadable, &restart); } else if (pcmk__is_set(metadata->ra_flags, ra_supports_legacy_reload)) { /* @COMPAT pre-OCF-1.1 resource agents * * Before OCF 1.1, Pacemaker abused "unique=0" to indicate * reloadability. Add any parameters with unique="1" to the * PCMK__XA_OP_FORCE_RESTART list. */ list = build_parameter_list(op, metadata, ra_param_unique, &restart); } else { // Resource does not support agent reloads return; } digest = pcmk__digest_op_params(restart); /* Add PCMK__XA_OP_FORCE_RESTART and PCMK__XA_OP_RESTART_DIGEST to indicate * the resource supports reload, no matter if it actually supports any * reloadable parameters */ pcmk__xe_set(update, PCMK__XA_OP_FORCE_RESTART, (list == NULL)? "" : (const char *) list->str); pcmk__xe_set(update, PCMK__XA_OP_RESTART_DIGEST, digest); if ((list != NULL) && (list->len > 0)) { crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str); } else { crm_trace("%s: %s", op->rsc_id, digest); } if (list != NULL) { g_string_free(list, TRUE); } pcmk__xml_free(restart); free(digest); } static void append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata, xmlNode *update, const char *version) { GString *list = NULL; char *digest = NULL; xmlNode *secure = NULL; CRM_LOG_ASSERT(op->params != NULL); /* To keep PCMK__XA_OP_SECURE_PARAMS short, we want it to contain the secure * parameters but PCMK__XA_OP_SECURE_DIGEST to be based on the insecure ones */ list = build_parameter_list(op, metadata, ra_param_private, &secure); if (list != NULL) { digest = pcmk__digest_op_params(secure); pcmk__xe_set(update, PCMK__XA_OP_SECURE_PARAMS, list->str); pcmk__xe_set(update, PCMK__XA_OP_SECURE_DIGEST, digest); crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str); g_string_free(list, TRUE); } else { crm_trace("%s: no secure parameters", op->rsc_id); } pcmk__xml_free(secure); free(digest); } /*! * \internal * \brief Create XML for a resource history entry * * \param[in] func Function name of caller * \param[in,out] parent XML to add entry to * \param[in] rsc Affected resource * \param[in,out] op Action to add an entry for (or NULL to do nothing) * \param[in] node_name Node where action occurred */ void controld_add_resource_history_xml_as(const char *func, xmlNode *parent, const lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, const char *node_name) { int target_rc = 0; xmlNode *xml_op = NULL; struct ra_metadata_s *metadata = NULL; const char *caller_version = NULL; lrm_state_t *lrm_state = NULL; if (op == NULL) { return; } target_rc = rsc_op_expected_rc(op); caller_version = g_hash_table_lookup(op->params, PCMK_XA_CRM_FEATURE_SET); CRM_CHECK(caller_version != NULL, caller_version = CRM_FEATURE_SET); xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc, controld_globals.cluster->priv->node_name, func); if (xml_op == NULL) { return; } if ((rsc == NULL) || (op->params == NULL) || !crm_op_needs_metadata(rsc->standard, op->op_type)) { crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)", op->op_type, op->rsc_id, op->params, rsc); return; } lrm_state = controld_get_executor_state(node_name, false); if (lrm_state == NULL) { crm_warn("Cannot calculate digests for operation " PCMK__OP_FMT " because we have no connection to executor for %s", op->rsc_id, op->op_type, op->interval_ms, node_name); return; } /* Ideally the metadata is cached, and the agent is just a fallback. * * @TODO Go through all callers and ensure they get metadata asynchronously * first. */ metadata = controld_get_rsc_metadata(lrm_state, rsc, controld_metadata_from_agent |controld_metadata_from_cache); if (metadata == NULL) { return; } crm_trace("Including additional digests for %s:%s:%s", rsc->standard, rsc->provider, rsc->type); append_restart_list(op, metadata, xml_op, caller_version); append_secure_list(op, metadata, xml_op, caller_version); return; } /*! * \internal * \brief Record an action as pending in the CIB, if appropriate * * \param[in] node_name Node where the action is pending * \param[in] rsc Resource that action is for * \param[in,out] op Pending action * * \return true if action was recorded in CIB, otherwise false */ bool controld_record_pending_op(const char *node_name, const lrmd_rsc_info_t *rsc, lrmd_event_data_t *op) { const char *record_pending = NULL; CRM_CHECK((node_name != NULL) && (rsc != NULL) && (op != NULL), return false); // Never record certain operation types as pending if ((op->op_type == NULL) || (op->params == NULL) || !controld_action_is_recordable(op->op_type)) { return false; } // Check action's PCMK_META_RECORD_PENDING meta-attribute (defaults to true) record_pending = crm_meta_value(op->params, PCMK_META_RECORD_PENDING); if ((record_pending != NULL) && !pcmk__is_true(record_pending)) { pcmk__warn_once(pcmk__wo_record_pending, "The " PCMK_META_RECORD_PENDING " option (for example, " "for the %s resource's %s operation) is deprecated and " "will be removed in a future release", rsc->id, op->op_type); return false; } op->call_id = -1; op->t_run = time(NULL); op->t_rcchange = op->t_run; lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL); crm_debug("Recording pending %s-interval %s for %s on %s in the CIB", pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id, node_name); controld_update_resource_history(node_name, rsc, op, 0); return true; } static void cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { switch (rc) { case pcmk_ok: case -pcmk_err_diff_failed: case -pcmk_err_diff_resync: crm_trace("Resource history update completed (call=%d rc=%d)", call_id, rc); break; default: if (call_id > 0) { crm_warn("Resource history update %d failed: %s " QB_XS " rc=%d", call_id, pcmk_strerror(rc), rc); } else { crm_warn("Resource history update failed: %s " QB_XS " rc=%d", pcmk_strerror(rc), rc); } } if (call_id == pending_rsc_update) { pending_rsc_update = 0; controld_trigger_fsa(); } } /* Only successful stops, and probes that found the resource inactive, get locks * recorded in the history. This ensures the resource stays locked to the node * until it is active there again after the node comes back up. */ static bool should_preserve_lock(lrmd_event_data_t *op) { if (!pcmk__is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { return false; } if (!strcmp(op->op_type, PCMK_ACTION_STOP) && (op->rc == PCMK_OCF_OK)) { return true; } if (!strcmp(op->op_type, PCMK_ACTION_MONITOR) && (op->rc == PCMK_OCF_NOT_RUNNING)) { return true; } return false; } /*! * \internal * \brief Request a CIB update * * \param[in] section Section of CIB to update * \param[in] data New XML of CIB section to update * \param[in] options CIB call options * \param[in] callback If not \c NULL, set this as the operation callback * * \return Standard Pacemaker return code * * \note If \p callback is \p cib_rsc_callback(), the CIB update's call ID is * stored in \p pending_rsc_update on success. */ int controld_update_cib(const char *section, xmlNode *data, int options, void (*callback)(xmlNode *, int, int, xmlNode *, void *)) { cib_t *cib = controld_globals.cib_conn; int cib_rc = -ENOTCONN; pcmk__assert(data != NULL); if (cib != NULL) { cib_rc = cib->cmds->modify(cib, section, data, options); if (cib_rc >= 0) { crm_debug("Submitted CIB update %d for %s section", cib_rc, section); } } if (callback == NULL) { if (cib_rc < 0) { crm_err("Failed to update CIB %s section: %s", section, pcmk_rc_str(pcmk_legacy2rc(cib_rc))); } } else { if ((cib_rc >= 0) && (callback == cib_rsc_callback)) { /* Checking for a particular callback is a little hacky, but it * didn't seem worth adding an output argument for cib_rc for just * one use case. */ pending_rsc_update = cib_rc; } fsa_register_cib_callback(cib_rc, NULL, callback); } return (cib_rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(cib_rc); } /*! * \internal * \brief Update resource history entry in CIB * * \param[in] node_name Node where action occurred * \param[in] rsc Resource that action is for * \param[in,out] op Action to record * \param[in] lock_time If nonzero, when resource was locked to node * * \note On success, the CIB update's call ID will be stored in * pending_rsc_update. */ void controld_update_resource_history(const char *node_name, const lrmd_rsc_info_t *rsc, lrmd_event_data_t *op, time_t lock_time) { xmlNode *update = NULL; xmlNode *xml = NULL; int call_opt = crmd_cib_smart_opt(); const char *node_id = NULL; const char *container = NULL; CRM_CHECK((node_name != NULL) && (op != NULL), return); if (rsc == NULL) { crm_warn("Resource %s no longer exists in the executor", op->rsc_id); controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id); return; } // update = pcmk__xe_create(NULL, PCMK_XE_STATUS); // xml = pcmk__xe_create(update, PCMK__XE_NODE_STATE); if (controld_is_local_node(node_name)) { node_id = controld_globals.our_uuid; } else { node_id = node_name; - pcmk__xe_set_bool_attr(xml, PCMK_XA_REMOTE_NODE, true); + pcmk__xe_set_bool(xml, PCMK_XA_REMOTE_NODE, true); } pcmk__xe_set(xml, PCMK_XA_ID, node_id); pcmk__xe_set(xml, PCMK_XA_UNAME, node_name); pcmk__xe_set(xml, PCMK_XA_CRM_DEBUG_ORIGIN, __func__); // xml = pcmk__xe_create(xml, PCMK__XE_LRM); pcmk__xe_set(xml, PCMK_XA_ID, node_id); // xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCES); // xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCE); pcmk__xe_set(xml, PCMK_XA_ID, op->rsc_id); pcmk__xe_set(xml, PCMK_XA_CLASS, rsc->standard); pcmk__xe_set(xml, PCMK_XA_PROVIDER, rsc->provider); pcmk__xe_set(xml, PCMK_XA_TYPE, rsc->type); if (lock_time != 0) { /* Actions on a locked resource should either preserve the lock by * recording it with the action result, or clear it. */ if (!should_preserve_lock(op)) { lock_time = 0; } pcmk__xe_set_time(xml, PCMK_OPT_SHUTDOWN_LOCK, lock_time); } if (op->params != NULL) { container = g_hash_table_lookup(op->params, CRM_META "_" PCMK__META_CONTAINER); if (container != NULL) { crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container); pcmk__xe_set(xml, PCMK__META_CONTAINER, container); } } // (possibly more than one) controld_add_resource_history_xml(xml, rsc, op, node_name); /* Update CIB asynchronously. Even if it fails, the resource state should be * discovered during the next election. Worst case, the node is wrongly * fenced for running a resource it isn't. */ crm_log_xml_trace(update, __func__); controld_update_cib(PCMK_XE_STATUS, update, call_opt, cib_rsc_callback); pcmk__xml_free(update); } /*! * \internal * \brief Erase an LRM history entry from the CIB, given the operation data * * \param[in] op Operation whose history should be deleted */ void controld_delete_action_history(const lrmd_event_data_t *op) { xmlNode *xml_top = NULL; CRM_CHECK(op != NULL, return); xml_top = pcmk__xe_create(NULL, PCMK__XE_LRM_RSC_OP); pcmk__xe_set_int(xml_top, PCMK__XA_CALL_ID, op->call_id); pcmk__xe_set(xml_top, PCMK__XA_TRANSITION_KEY, op->user_data); if (op->interval_ms > 0) { char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms); /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */ pcmk__xe_set(xml_top, PCMK_XA_ID, op_id); free(op_id); } crm_debug("Erasing resource operation history for " PCMK__OP_FMT " (call=%d)", op->rsc_id, op->op_type, op->interval_ms, op->call_id); controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, PCMK_XE_STATUS, xml_top, cib_none); crm_log_xml_trace(xml_top, "op:cancel"); pcmk__xml_free(xml_top); } /* Define xpath to find LRM resource history entry by node and resource */ #define XPATH_HISTORY \ "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \ "/" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" \ "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES \ "/" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" \ "/" PCMK__XE_LRM_RSC_OP /* ... and also by operation key */ #define XPATH_HISTORY_ID XPATH_HISTORY "[@" PCMK_XA_ID "='%s']" /* ... and also by operation key and operation call ID */ #define XPATH_HISTORY_CALL XPATH_HISTORY \ "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_CALL_ID "='%d']" /* ... and also by operation key and original operation key */ #define XPATH_HISTORY_ORIG XPATH_HISTORY \ "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_OPERATION_KEY "='%s']" /*! * \internal * \brief Delete a last_failure resource history entry from the CIB * * \param[in] rsc_id Name of resource to clear history for * \param[in] node Name of node to clear history for * \param[in] action If specified, delete only if this was failed action * \param[in] interval_ms If \p action is specified, it has this interval */ void controld_cib_delete_last_failure(const char *rsc_id, const char *node, const char *action, guint interval_ms) { char *xpath = NULL; char *last_failure_key = NULL; CRM_CHECK((rsc_id != NULL) && (node != NULL), return); // Generate XPath to match desired entry last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0); if (action == NULL) { xpath = pcmk__assert_asprintf(XPATH_HISTORY_ID, node, rsc_id, last_failure_key); } else { char *action_key = pcmk__op_key(rsc_id, action, interval_ms); xpath = pcmk__assert_asprintf(XPATH_HISTORY_ORIG, node, rsc_id, last_failure_key, action_key); free(action_key); } free(last_failure_key); controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath, NULL, cib_xpath); free(xpath); } /*! * \internal * \brief Delete resource history entry from the CIB, given operation key * * \param[in] rsc_id Name of resource to clear history for * \param[in] node Name of node to clear history for * \param[in] key Operation key of operation to clear history for * \param[in] call_id If specified, delete entry only if it has this call ID */ void controld_delete_action_history_by_key(const char *rsc_id, const char *node, const char *key, int call_id) { char *xpath = NULL; CRM_CHECK((rsc_id != NULL) && (node != NULL) && (key != NULL), return); if (call_id > 0) { xpath = pcmk__assert_asprintf(XPATH_HISTORY_CALL, node, rsc_id, key, call_id); } else { xpath = pcmk__assert_asprintf(XPATH_HISTORY_ID, node, rsc_id, key); } controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath, NULL, cib_xpath); free(xpath); } diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c index ba2b751be8..cb1a3d7270 100644 --- a/daemons/controld/controld_join_dc.c +++ b/daemons/controld/controld_join_dc.c @@ -1,1097 +1,1097 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include // PRIu32 #include // bool, true, false #include // NULL #include // free(), etc. #include // gboolean, etc. #include // xmlNode #include #include #include #include static char *max_generation_from = NULL; static xmlNodePtr max_generation_xml = NULL; /*! * \internal * \brief Nodes from which a CIB sync has failed since the peer joined * * This table is of the form (node_name -> join_id). \p node_name is * the name of a client node from which a CIB \p sync_from() call has failed in * \p do_dc_join_finalize() since the client joined the cluster as a peer. * \p join_id is the ID of the join round in which the \p sync_from() failed, * and is intended for use in nack log messages. */ static GHashTable *failed_sync_nodes = NULL; void finalize_join_for(gpointer key, gpointer value, gpointer user_data); void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data); gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); /* Numeric counter used to identify join rounds (an unsigned int would be * appropriate, except we get and set it in XML as int) */ static int current_join_id = 0; /*! * \internal * \brief Get log-friendly string equivalent of a controller group join phase * * \param[in] phase Join phase * * \return Log-friendly string equivalent of \p phase */ static const char * join_phase_text(enum controld_join_phase phase) { switch (phase) { case controld_join_nack: return "nack"; case controld_join_none: return "none"; case controld_join_welcomed: return "welcomed"; case controld_join_integrated: return "integrated"; case controld_join_finalized: return "finalized"; case controld_join_confirmed: return "confirmed"; default: return "invalid"; } } /*! * \internal * \brief Destroy the hash table containing failed sync nodes */ void controld_destroy_failed_sync_table(void) { if (failed_sync_nodes != NULL) { g_hash_table_destroy(failed_sync_nodes); failed_sync_nodes = NULL; } } /*! * \internal * \brief Remove a node from the failed sync nodes table if present * * \param[in] node_name Node name to remove */ void controld_remove_failed_sync_node(const char *node_name) { if (failed_sync_nodes != NULL) { g_hash_table_remove(failed_sync_nodes, (gchar *) node_name); } } /*! * \internal * \brief Add to a hash table a node whose CIB failed to sync * * \param[in] node_name Name of node whose CIB failed to sync * \param[in] join_id Join round when the failure occurred */ static void record_failed_sync_node(const char *node_name, gint join_id) { if (failed_sync_nodes == NULL) { failed_sync_nodes = pcmk__strikey_table(g_free, NULL); } /* If the node is already in the table then we failed to nack it during the * filter offer step */ CRM_LOG_ASSERT(g_hash_table_insert(failed_sync_nodes, g_strdup(node_name), GINT_TO_POINTER(join_id))); } /*! * \internal * \brief Look up a node name in the failed sync table * * \param[in] node_name Name of node to look up * \param[out] join_id Where to store the join ID of when the sync failed * * \return Standard Pacemaker return code. Specifically, \p pcmk_rc_ok if the * node name was found, or \p pcmk_rc_node_unknown otherwise. * \note \p *join_id is set to -1 if the node is not found. */ static int lookup_failed_sync_node(const char *node_name, gint *join_id) { *join_id = -1; if (failed_sync_nodes != NULL) { gpointer result = g_hash_table_lookup(failed_sync_nodes, (gchar *) node_name); if (result != NULL) { *join_id = GPOINTER_TO_INT(result); return pcmk_rc_ok; } } return pcmk_rc_node_unknown; } void crm_update_peer_join(const char *source, pcmk__node_status_t *node, enum controld_join_phase phase) { enum controld_join_phase last = controld_get_join_phase(node); CRM_CHECK(node != NULL, return); /* Remote nodes do not participate in joins */ if (pcmk__is_set(node->flags, pcmk__node_status_remote)) { return; } if (phase == last) { crm_trace("Node %s join-%d phase is still %s " QB_XS " nodeid=%" PRIu32 " source=%s", node->name, current_join_id, join_phase_text(last), node->cluster_layer_id, source); return; } if ((phase <= controld_join_none) || (phase == (last + 1))) { struct controld_node_status_data *data = NULL; if (node->user_data == NULL) { node->user_data = pcmk__assert_alloc(1, sizeof(struct controld_node_status_data)); } data = node->user_data; data->join_phase = phase; crm_trace("Node %s join-%d phase is now %s (was %s) " QB_XS " nodeid=%" PRIu32 " source=%s", node->name, current_join_id, join_phase_text(phase), join_phase_text(last), node->cluster_layer_id, source); return; } crm_warn("Rejecting join-%d phase update for node %s because can't go from " "%s to %s " QB_XS " nodeid=%" PRIu32 " source=%s", current_join_id, node->name, join_phase_text(last), join_phase_text(phase), node->cluster_layer_id, source); } static void start_join_round(void) { GHashTableIter iter; pcmk__node_status_t *peer = NULL; crm_debug("Starting new join round join-%d", current_join_id); g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { crm_update_peer_join(__func__, peer, controld_join_none); } if (max_generation_from != NULL) { free(max_generation_from); max_generation_from = NULL; } if (max_generation_xml != NULL) { pcmk__xml_free(max_generation_xml); max_generation_xml = NULL; } controld_clear_fsa_input_flags(R_HAVE_CIB); } /*! * \internal * \brief Create a join message from the DC * * \param[in] join_op Join operation name * \param[in] host_to Recipient of message */ static xmlNode * create_dc_message(const char *join_op, const char *host_to) { xmlNode *msg = pcmk__new_request(pcmk_ipc_controld, CRM_SYSTEM_DC, host_to, CRM_SYSTEM_CRMD, join_op, NULL); /* Identify which election this is a part of */ pcmk__xe_set_int(msg, PCMK__XA_JOIN_ID, current_join_id); /* Add a field specifying whether the DC is shutting down. This keeps the * joining node from fencing the old DC if it becomes the new DC. */ - pcmk__xe_set_bool_attr(msg, PCMK__XA_DC_LEAVING, - pcmk__is_set(controld_globals.fsa_input_register, - R_SHUTDOWN)); + pcmk__xe_set_bool(msg, PCMK__XA_DC_LEAVING, + pcmk__is_set(controld_globals.fsa_input_register, + R_SHUTDOWN)); return msg; } static void join_make_offer(gpointer key, gpointer value, gpointer user_data) { /* @TODO We don't use user_data except to distinguish one particular call * from others. Make this clearer. */ xmlNode *offer = NULL; pcmk__node_status_t *member = (pcmk__node_status_t *) value; pcmk__assert(member != NULL); if (!pcmk__cluster_is_node_active(member)) { crm_info("Not making join-%d offer to inactive node %s", current_join_id, pcmk__s(member->name, "with unknown name")); if ((member->expected == NULL) && pcmk__str_eq(member->state, PCMK__VALUE_LOST, pcmk__str_none)) { /* You would think this unsafe, but in fact this plus an * active resource is what causes it to be fenced. * * Yes, this does mean that any node that dies at the same * time as the old DC and is not running resource (still) * won't be fenced. * * I'm not happy about this either. */ pcmk__update_peer_expected(__func__, member, CRMD_JOINSTATE_DOWN); } return; } if (member->name == NULL) { crm_info("Not making join-%d offer to node uuid %s with unknown name", current_join_id, member->xml_id); return; } if (controld_globals.membership_id != controld_globals.peer_seq) { controld_globals.membership_id = controld_globals.peer_seq; crm_info("Making join-%d offers based on membership event %llu", current_join_id, controld_globals.peer_seq); } if (user_data != NULL) { enum controld_join_phase phase = controld_get_join_phase(member); if (phase > controld_join_none) { crm_info("Not making join-%d offer to already known node %s (%s)", current_join_id, member->name, join_phase_text(phase)); return; } } crm_update_peer_join(__func__, (pcmk__node_status_t*) member, controld_join_none); offer = create_dc_message(CRM_OP_JOIN_OFFER, member->name); // Advertise our feature set so the joining node can bail if not compatible pcmk__xe_set(offer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); crm_info("Sending join-%d offer to %s", current_join_id, member->name); pcmk__cluster_send_message(member, pcmk_ipc_controld, offer); pcmk__xml_free(offer); crm_update_peer_join(__func__, member, controld_join_welcomed); } /* A_DC_JOIN_OFFER_ALL */ void do_dc_join_offer_all(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int count; /* Reset everyone's status back to down or in_ccm in the CIB. * Any nodes that are active in the CIB but not in the cluster membership * will be seen as offline by the scheduler anyway. */ current_join_id++; start_join_round(); update_dc(NULL); if (cause == C_HA_MESSAGE && current_input == I_NODE_JOIN) { crm_info("A new node joined the cluster"); } g_hash_table_foreach(pcmk__peer_cache, join_make_offer, NULL); count = crmd_join_phase_count(controld_join_welcomed); crm_info("Waiting on join-%d requests from %d outstanding node%s", current_join_id, count, pcmk__plural_s(count)); // Don't waste time by invoking the scheduler yet } /* A_DC_JOIN_OFFER_ONE */ void do_dc_join_offer_one(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { pcmk__node_status_t *member = NULL; ha_msg_input_t *welcome = NULL; int count; const char *join_to = NULL; if (msg_data->data == NULL) { crm_info("Making join-%d offers to any unconfirmed nodes " "because an unknown node joined", current_join_id); g_hash_table_foreach(pcmk__peer_cache, join_make_offer, &member); check_join_state(cur_state, __func__); return; } welcome = fsa_typed_data(fsa_dt_ha_msg); if (welcome == NULL) { // fsa_typed_data() already logged an error return; } join_to = pcmk__xe_get(welcome->msg, PCMK__XA_SRC); if (join_to == NULL) { crm_err("Can't make join-%d offer to unknown node", current_join_id); return; } member = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster_member); /* It is possible that a node will have been sick or starting up when the * original offer was made. However, it will either re-announce itself in * due course, or we can re-store the original offer on the client. */ crm_update_peer_join(__func__, member, controld_join_none); join_make_offer(NULL, member, NULL); /* If the offer isn't to the local node, make an offer to the local node as * well, to ensure the correct value for max_generation_from. */ if (!controld_is_local_node(join_to)) { member = controld_get_local_node_status(); join_make_offer(NULL, member, NULL); } /* This was a genuine join request; cancel any existing transition and * invoke the scheduler. */ abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Node join", NULL); count = crmd_join_phase_count(controld_join_welcomed); crm_info("Waiting on join-%d requests from %d outstanding node%s", current_join_id, count, pcmk__plural_s(count)); // Don't waste time by invoking the scheduler yet } static int compare_int_fields(xmlNode * left, xmlNode * right, const char *field) { const char *elem_l = pcmk__xe_get(left, field); const char *elem_r = pcmk__xe_get(right, field); long long int_elem_l; long long int_elem_r; int rc = pcmk_rc_ok; rc = pcmk__scan_ll(elem_l, &int_elem_l, -1LL); if (rc != pcmk_rc_ok) { // Shouldn't be possible crm_warn("Comparing current CIB %s as -1 " "because '%s' is not an integer", field, elem_l); } rc = pcmk__scan_ll(elem_r, &int_elem_r, -1LL); if (rc != pcmk_rc_ok) { // Shouldn't be possible crm_warn("Comparing joining node's CIB %s as -1 " "because '%s' is not an integer", field, elem_r); } if (int_elem_l < int_elem_r) { return -1; } else if (int_elem_l > int_elem_r) { return 1; } return 0; } /* A_DC_JOIN_PROCESS_REQ */ void do_dc_join_filter_offer(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { xmlNode *generation = NULL; int cmp = 0; int join_id = -1; int count = 0; gint value = 0; gboolean ack_nack_bool = TRUE; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); const char *join_from = pcmk__xe_get(join_ack->msg, PCMK__XA_SRC); const char *ref = pcmk__xe_get(join_ack->msg, PCMK_XA_REFERENCE); const char *join_version = pcmk__xe_get(join_ack->msg, PCMK_XA_CRM_FEATURE_SET); pcmk__node_status_t *join_node = NULL; if (join_from == NULL) { crm_err("Ignoring invalid join request without node name"); return; } join_node = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster_member); pcmk__xe_get_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id); if (join_id != current_join_id) { crm_debug("Ignoring join-%d request from %s because we are on join-%d", join_id, join_from, current_join_id); check_join_state(cur_state, __func__); return; } generation = join_ack->xml; if (max_generation_xml != NULL && generation != NULL) { int lpc = 0; const char *attributes[] = { PCMK_XA_ADMIN_EPOCH, PCMK_XA_EPOCH, PCMK_XA_NUM_UPDATES, }; /* It's not obvious that join_ack->xml is the PCMK__XE_GENERATION_TUPLE * element from the join client. The "if" guard is for clarity. */ if (pcmk__xe_is(generation, PCMK__XE_GENERATION_TUPLE)) { for (lpc = 0; cmp == 0 && lpc < PCMK__NELEM(attributes); lpc++) { cmp = compare_int_fields(max_generation_xml, generation, attributes[lpc]); } } else { // Should always be PCMK__XE_GENERATION_TUPLE CRM_LOG_ASSERT(false); } } if (ref == NULL) { ref = "none"; // for logging only } if (lookup_failed_sync_node(join_from, &value) == pcmk_rc_ok) { crm_err("Rejecting join-%d request from node %s because we failed to " "sync its CIB in join-%d " QB_XS " ref=%s", join_id, join_from, value, ref); ack_nack_bool = FALSE; } else if (!pcmk__cluster_is_node_active(join_node)) { if (match_down_event(join_from) != NULL) { /* The join request was received after the node was fenced or * otherwise shutdown in a way that we're aware of. No need to log * an error in this rare occurrence; we know the client was recently * shut down, and receiving a lingering in-flight request is not * cause for alarm. */ crm_debug("Rejecting join-%d request from inactive node %s " QB_XS " ref=%s", join_id, join_from, ref); } else { crm_err("Rejecting join-%d request from inactive node %s " QB_XS " ref=%s", join_id, join_from, ref); } ack_nack_bool = FALSE; } else if (generation == NULL) { crm_err("Rejecting invalid join-%d request from node %s " "missing CIB generation " QB_XS " ref=%s", join_id, join_from, ref); ack_nack_bool = FALSE; } else if ((join_version == NULL) || !feature_set_compatible(CRM_FEATURE_SET, join_version)) { crm_err("Rejecting join-%d request from node %s because feature set %s" " is incompatible with ours (%s) " QB_XS " ref=%s", join_id, join_from, (join_version? join_version : "pre-3.1.0"), CRM_FEATURE_SET, ref); ack_nack_bool = FALSE; } else if (max_generation_xml == NULL) { const char *validation = pcmk__xe_get(generation, PCMK_XA_VALIDATE_WITH); if (pcmk__get_schema(validation) == NULL) { crm_err("Rejecting join-%d request from %s (with first CIB " "generation) due to %s schema version %s " QB_XS " ref=%s", join_id, join_from, ((validation == NULL)? "missing" : "unknown"), pcmk__s(validation, ""), ref); ack_nack_bool = FALSE; } else { crm_debug("Accepting join-%d request from %s (with first CIB " "generation) " QB_XS " ref=%s", join_id, join_from, ref); max_generation_xml = pcmk__xml_copy(NULL, generation); pcmk__str_update(&max_generation_from, join_from); } } else if ((cmp < 0) || ((cmp == 0) && controld_is_local_node(join_from))) { const char *validation = pcmk__xe_get(generation, PCMK_XA_VALIDATE_WITH); if (pcmk__get_schema(validation) == NULL) { crm_err("Rejecting join-%d request from %s (with better CIB " "generation than current best from %s) due to %s " "schema version %s " QB_XS " ref=%s", join_id, join_from, max_generation_from, ((validation == NULL)? "missing" : "unknown"), pcmk__s(validation, ""), ref); ack_nack_bool = FALSE; } else { crm_debug("Accepting join-%d request from %s (with better CIB " "generation than current best from %s) " QB_XS " ref=%s", join_id, join_from, max_generation_from, ref); crm_log_xml_debug(max_generation_xml, "Old max generation"); crm_log_xml_debug(generation, "New max generation"); pcmk__xml_free(max_generation_xml); max_generation_xml = pcmk__xml_copy(NULL, join_ack->xml); pcmk__str_update(&max_generation_from, join_from); } } else { crm_debug("Accepting join-%d request from %s " QB_XS " ref=%s", join_id, join_from, ref); } if (!ack_nack_bool) { crm_update_peer_join(__func__, join_node, controld_join_nack); pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_NACK); } else { crm_update_peer_join(__func__, join_node, controld_join_integrated); pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER); } count = crmd_join_phase_count(controld_join_integrated); crm_debug("%d node%s currently integrated in join-%d", count, pcmk__plural_s(count), join_id); if (check_join_state(cur_state, __func__) == FALSE) { // Don't waste time by invoking the scheduler yet count = crmd_join_phase_count(controld_join_welcomed); crm_debug("Waiting on join-%d requests from %d outstanding node%s", join_id, count, pcmk__plural_s(count)); } } /* A_DC_JOIN_FINALIZE */ void do_dc_join_finalize(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { char *sync_from = NULL; int rc = pcmk_ok; int count_welcomed = crmd_join_phase_count(controld_join_welcomed); int count_finalizable = crmd_join_phase_count(controld_join_integrated) + crmd_join_phase_count(controld_join_nack); /* This we can do straight away and avoid clients timing us out * while we compute the latest CIB */ if (count_welcomed != 0) { crm_debug("Waiting on join-%d requests from %d outstanding node%s " "before finalizing join", current_join_id, count_welcomed, pcmk__plural_s(count_welcomed)); crmd_join_phase_log(LOG_DEBUG); /* crmd_fsa_stall(FALSE); Needed? */ return; } else if (count_finalizable == 0) { crm_debug("Finalization not needed for join-%d at the current time", current_join_id); crmd_join_phase_log(LOG_DEBUG); check_join_state(controld_globals.fsa_state, __func__); return; } controld_clear_fsa_input_flags(R_HAVE_CIB); if ((max_generation_from == NULL) || controld_is_local_node(max_generation_from)) { controld_set_fsa_input_flags(R_HAVE_CIB); } if (!controld_globals.transition_graph->complete) { crm_warn("Delaying join-%d finalization while transition in progress", current_join_id); crmd_join_phase_log(LOG_DEBUG); crmd_fsa_stall(FALSE); return; } if (pcmk__is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { // Send our CIB out to everyone sync_from = pcmk__str_copy(controld_globals.cluster->priv->node_name); } else { // Ask for the agreed best CIB sync_from = pcmk__str_copy(max_generation_from); } crm_notice("Finalizing join-%d for %d node%s (sync'ing CIB %s.%s.%s " "with schema %s and feature set %s from %s)", current_join_id, count_finalizable, pcmk__plural_s(count_finalizable), pcmk__xe_get(max_generation_xml, PCMK_XA_ADMIN_EPOCH), pcmk__xe_get(max_generation_xml, PCMK_XA_EPOCH), pcmk__xe_get(max_generation_xml, PCMK_XA_NUM_UPDATES), pcmk__xe_get(max_generation_xml, PCMK_XA_VALIDATE_WITH), pcmk__xe_get(max_generation_xml, PCMK_XA_CRM_FEATURE_SET), sync_from); crmd_join_phase_log(LOG_DEBUG); rc = controld_globals.cib_conn->cmds->sync_from(controld_globals.cib_conn, sync_from, NULL, cib_none); fsa_register_cib_callback(rc, sync_from, finalize_sync_callback); } void free_max_generation(void) { free(max_generation_from); max_generation_from = NULL; pcmk__xml_free(max_generation_xml); max_generation_xml = NULL; } void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { CRM_LOG_ASSERT(-EPERM != rc); if (rc != pcmk_ok) { const char *sync_from = (const char *) user_data; do_crm_log(((rc == -pcmk_err_old_data)? LOG_WARNING : LOG_ERR), "Could not sync CIB from %s in join-%d: %s", sync_from, current_join_id, pcmk_strerror(rc)); if (rc != -pcmk_err_old_data) { record_failed_sync_node(sync_from, current_join_id); } /* restart the whole join process */ register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION_DC, NULL, NULL, __func__); } else if (!AM_I_DC) { crm_debug("Sync'ed CIB for join-%d but no longer DC", current_join_id); } else if (controld_globals.fsa_state != S_FINALIZE_JOIN) { crm_debug("Sync'ed CIB for join-%d but no longer in S_FINALIZE_JOIN " "(%s)", current_join_id, fsa_state2string(controld_globals.fsa_state)); } else { controld_set_fsa_input_flags(R_HAVE_CIB); /* make sure dc_uuid is re-set to us */ if (!check_join_state(controld_globals.fsa_state, __func__)) { int count_finalizable = 0; count_finalizable = crmd_join_phase_count(controld_join_integrated) + crmd_join_phase_count(controld_join_nack); crm_debug("Notifying %d node%s of join-%d results", count_finalizable, pcmk__plural_s(count_finalizable), current_join_id); g_hash_table_foreach(pcmk__peer_cache, finalize_join_for, NULL); } } } static void join_node_state_commit_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { const char *node = user_data; if (rc != pcmk_ok) { fsa_data_t *msg_data = NULL; // for register_fsa_error() macro crm_crit("join-%d node history update (via CIB call %d) for node %s " "failed: %s", current_join_id, call_id, node, pcmk_strerror(rc)); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } crm_debug("join-%d node history update (via CIB call %d) for node %s " "complete", current_join_id, call_id, node); check_join_state(controld_globals.fsa_state, __func__); } /* A_DC_JOIN_PROCESS_ACK */ void do_dc_join_ack(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { int join_id = -1; ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg); const char *op = pcmk__xe_get(join_ack->msg, PCMK__XA_CRM_TASK); char *join_from = pcmk__xe_get_copy(join_ack->msg, PCMK__XA_SRC); pcmk__node_status_t *peer = NULL; enum controld_join_phase phase = controld_join_none; enum controld_section_e section = controld_section_lrm; char *xpath = NULL; xmlNode *state = join_ack->xml; xmlNode *execd_state = NULL; cib_t *cib = controld_globals.cib_conn; int rc = pcmk_ok; // Sanity checks if (join_from == NULL) { crm_warn("Ignoring message received without node identification"); goto done; } if (op == NULL) { crm_warn("Ignoring message received from %s without task", join_from); goto done; } if (strcmp(op, CRM_OP_JOIN_CONFIRM)) { crm_debug("Ignoring '%s' message from %s while waiting for '%s'", op, join_from, CRM_OP_JOIN_CONFIRM); goto done; } if (pcmk__xe_get_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id) != pcmk_rc_ok) { crm_warn("Ignoring join confirmation from %s without valid join ID", join_from); goto done; } peer = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster_member); phase = controld_get_join_phase(peer); if (phase != controld_join_finalized) { crm_info("Ignoring out-of-sequence join-%d confirmation from %s " "(currently %s not %s)", join_id, join_from, join_phase_text(phase), join_phase_text(controld_join_finalized)); goto done; } if (join_id != current_join_id) { crm_err("Rejecting join-%d confirmation from %s " "because currently on join-%d", join_id, join_from, current_join_id); crm_update_peer_join(__func__, peer, controld_join_nack); goto done; } crm_update_peer_join(__func__, peer, controld_join_confirmed); /* Update CIB with node's current executor state. A new transition will be * triggered later, when the CIB manager notifies us of the change. * * The delete and modify requests are part of an atomic transaction. */ rc = cib->cmds->init_transaction(cib); if (rc != pcmk_ok) { goto done; } // Delete relevant parts of node's current executor state from CIB if (pcmk__is_set(controld_globals.flags, controld_shutdown_lock_enabled)) { section = controld_section_lrm_unlocked; } controld_node_state_deletion_strings(join_from, section, &xpath, NULL); rc = cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_multiple|cib_transaction); if (rc != pcmk_ok) { goto done; } // Update CIB with node's latest known executor state if (controld_is_local_node(join_from)) { // Use the latest possible state if processing our own join ack execd_state = controld_query_executor_state(); if (execd_state != NULL) { crm_debug("Updating local node history for join-%d from query " "result", current_join_id); state = execd_state; } else { crm_warn("Updating local node history from join-%d confirmation " "because query failed", current_join_id); } } else { crm_debug("Updating node history for %s from join-%d confirmation", join_from, current_join_id); } rc = cib->cmds->modify(cib, PCMK_XE_STATUS, state, cib_can_create|cib_transaction); pcmk__xml_free(execd_state); if (rc != pcmk_ok) { goto done; } // Commit the transaction rc = cib->cmds->end_transaction(cib, true, cib_none); fsa_register_cib_callback(rc, join_from, join_node_state_commit_callback); if (rc > 0) { // join_from will be freed after callback join_from = NULL; rc = pcmk_ok; } done: if (rc != pcmk_ok) { crm_crit("join-%d node history update for node %s failed: %s", current_join_id, join_from, pcmk_strerror(rc)); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } free(join_from); free(xpath); } void finalize_join_for(gpointer key, gpointer value, gpointer user_data) { xmlNode *acknak = NULL; xmlNode *tmp1 = NULL; pcmk__node_status_t *join_node = value; const char *join_to = join_node->name; enum controld_join_phase phase = controld_get_join_phase(join_node); bool integrated = false; switch (phase) { case controld_join_integrated: integrated = true; break; case controld_join_nack: break; default: crm_trace("Not updating non-integrated and non-nacked node %s (%s) " "for join-%d", join_to, join_phase_text(phase), current_join_id); return; } /* Update the element with the node's name and UUID, in case they * weren't known before */ crm_trace("Updating node name and UUID in CIB for %s", join_to); tmp1 = pcmk__xe_create(NULL, PCMK_XE_NODE); pcmk__xe_set(tmp1, PCMK_XA_ID, pcmk__cluster_get_xml_id(join_node)); pcmk__xe_set(tmp1, PCMK_XA_UNAME, join_to); fsa_cib_anon_update(PCMK_XE_NODES, tmp1); pcmk__xml_free(tmp1); join_node = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster_member); if (!pcmk__cluster_is_node_active(join_node)) { /* * NACK'ing nodes that the membership layer doesn't know about yet * simply creates more churn * * Better to leave them waiting and let the join restart when * the new membership event comes in * * All other NACKs (due to versions etc) should still be processed */ pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_PENDING); return; } // Acknowledge or nack node's join request crm_debug("%sing join-%d request from %s", integrated? "Acknowledg" : "Nack", current_join_id, join_to); acknak = create_dc_message(CRM_OP_JOIN_ACKNAK, join_to); - pcmk__xe_set_bool_attr(acknak, CRM_OP_JOIN_ACKNAK, integrated); + pcmk__xe_set_bool(acknak, CRM_OP_JOIN_ACKNAK, integrated); if (integrated) { // No change needed for a nacked node crm_update_peer_join(__func__, join_node, controld_join_finalized); pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER); /* Iterate through the remote peer cache and add information on which * node hosts each to the ACK message. This keeps new controllers in * sync with what has already happened. */ if (pcmk__cluster_num_remote_nodes() > 0) { GHashTableIter iter; pcmk__node_status_t *node = NULL; xmlNode *remotes = pcmk__xe_create(acknak, PCMK_XE_NODES); g_hash_table_iter_init(&iter, pcmk__remote_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { xmlNode *remote = NULL; if (!node->conn_host) { continue; } remote = pcmk__xe_create(remotes, PCMK_XE_NODE); pcmk__xe_set_props(remote, PCMK_XA_ID, node->name, PCMK__XA_NODE_STATE, node->state, PCMK__XA_CONNECTION_HOST, node->conn_host, NULL); } } } pcmk__cluster_send_message(join_node, pcmk_ipc_controld, acknak); pcmk__xml_free(acknak); return; } gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source) { static unsigned long long highest_seq = 0; if (controld_globals.membership_id != controld_globals.peer_seq) { crm_debug("join-%d: Membership changed from %llu to %llu " QB_XS " highest=%llu state=%s for=%s", current_join_id, controld_globals.membership_id, controld_globals.peer_seq, highest_seq, fsa_state2string(cur_state), source); if (highest_seq < controld_globals.peer_seq) { /* Don't spam the FSA with duplicates */ highest_seq = controld_globals.peer_seq; register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL); } } else if (cur_state == S_INTEGRATION) { if (crmd_join_phase_count(controld_join_welcomed) == 0) { int count = crmd_join_phase_count(controld_join_integrated); crm_debug("join-%d: Integration of %d peer%s complete " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); register_fsa_input_before(C_FSA_INTERNAL, I_INTEGRATED, NULL); return TRUE; } } else if (cur_state == S_FINALIZE_JOIN) { if (!pcmk__is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { crm_debug("join-%d: Delaying finalization until we have CIB " QB_XS " state=%s for=%s", current_join_id, fsa_state2string(cur_state), source); return TRUE; } else if (crmd_join_phase_count(controld_join_welcomed) != 0) { int count = crmd_join_phase_count(controld_join_welcomed); crm_debug("join-%d: Still waiting on %d welcomed node%s " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); crmd_join_phase_log(LOG_DEBUG); } else if (crmd_join_phase_count(controld_join_integrated) != 0) { int count = crmd_join_phase_count(controld_join_integrated); crm_debug("join-%d: Still waiting on %d integrated node%s " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); crmd_join_phase_log(LOG_DEBUG); } else if (crmd_join_phase_count(controld_join_finalized) != 0) { int count = crmd_join_phase_count(controld_join_finalized); crm_debug("join-%d: Still waiting on %d finalized node%s " QB_XS " state=%s for=%s", current_join_id, count, pcmk__plural_s(count), fsa_state2string(cur_state), source); crmd_join_phase_log(LOG_DEBUG); } else { crm_debug("join-%d: Complete " QB_XS " state=%s for=%s", current_join_id, fsa_state2string(cur_state), source); register_fsa_input_later(C_FSA_INTERNAL, I_FINALIZED, NULL); return TRUE; } } return FALSE; } void do_dc_join_final(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { crm_debug("Ensuring DC, quorum and node attributes are up-to-date"); crm_update_quorum(pcmk__cluster_has_quorum(), TRUE); } int crmd_join_phase_count(enum controld_join_phase phase) { int count = 0; pcmk__node_status_t *peer; GHashTableIter iter; g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { if (controld_get_join_phase(peer) == phase) { count++; } } return count; } void crmd_join_phase_log(int level) { pcmk__node_status_t *peer; GHashTableIter iter; g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) { do_crm_log(level, "join-%d: %s=%s", current_join_id, peer->name, join_phase_text(controld_get_join_phase(peer))); } } diff --git a/daemons/controld/controld_membership.c b/daemons/controld/controld_membership.c index 7b3d4552bd..9e354bcda4 100644 --- a/daemons/controld/controld_membership.c +++ b/daemons/controld/controld_membership.c @@ -1,496 +1,496 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ /* put these first so that uuid_t is defined without conflicts */ #include #include // uint32_t #include #include #include #include #include #include void post_cache_update(int instance); extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source); static void reap_dead_nodes(gpointer key, gpointer value, gpointer user_data) { pcmk__node_status_t *node = value; if (pcmk__cluster_is_node_active(node)) { return; } crm_update_peer_join(__func__, node, controld_join_none); if ((node != NULL) && (node->name != NULL)) { if (controld_is_local_node(node->name)) { crm_err("We're not part of the cluster anymore"); register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL); } else if (!AM_I_DC && pcmk__str_eq(node->name, controld_globals.dc_name, pcmk__str_casei)) { crm_warn("Our DC node (%s) left the cluster", node->name); register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL); } } if ((controld_globals.fsa_state == S_INTEGRATION) || (controld_globals.fsa_state == S_FINALIZE_JOIN)) { check_join_state(controld_globals.fsa_state, __func__); } if ((node != NULL) && (node->xml_id != NULL)) { fail_incompletable_actions(controld_globals.transition_graph, node->xml_id); } } void post_cache_update(int instance) { xmlNode *no_op = NULL; controld_globals.peer_seq = instance; crm_debug("Updated cache after membership event %d.", instance); g_hash_table_foreach(pcmk__peer_cache, reap_dead_nodes, NULL); controld_set_fsa_input_flags(R_MEMBERSHIP); if (AM_I_DC) { populate_cib_nodes(controld_node_update_quick |controld_node_update_cluster |controld_node_update_peer |controld_node_update_expected, __func__); } /* * If we lost nodes, we should re-check the election status * Safe to call outside of an election */ controld_set_fsa_action_flags(A_ELECTION_CHECK); controld_trigger_fsa(); /* Membership changed, remind everyone we're here. * This will aid detection of duplicate DCs */ no_op = pcmk__new_request(pcmk_ipc_controld, (AM_I_DC? CRM_SYSTEM_DC : CRM_SYSTEM_CRMD), NULL, CRM_SYSTEM_CRMD, CRM_OP_NOOP, NULL); pcmk__cluster_send_message(NULL, pcmk_ipc_controld, no_op); pcmk__xml_free(no_op); } static void crmd_node_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { fsa_data_t *msg_data = NULL; if (rc == pcmk_ok) { crm_trace("Node update %d complete", call_id); } else if(call_id < pcmk_ok) { crm_err("Node update failed: %s (%d)", pcmk_strerror(call_id), call_id); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } else { crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } /*! * \internal * \brief Create an XML node state tag with updates * * \param[in,out] node Node whose state will be used for update * \param[in] flags Group of enum controld_node_update_flags * \param[in,out] parent XML node to contain update (or NULL) * \param[in] source Who requested the update (only used for logging) * * \return Pointer to created node state tag */ xmlNode * create_node_state_update(pcmk__node_status_t *node, uint32_t flags, xmlNode *parent, const char *source) { // @TODO Ensure all callers handle NULL returns const char *id = NULL; const char *value = NULL; xmlNode *node_state; if (!node->state) { crm_info("Node update for %s cancelled: no state, not seen yet", node->name); return NULL; } node_state = pcmk__xe_create(parent, PCMK__XE_NODE_STATE); if (pcmk__is_set(node->flags, pcmk__node_status_remote)) { - pcmk__xe_set_bool_attr(node_state, PCMK_XA_REMOTE_NODE, true); + pcmk__xe_set_bool(node_state, PCMK_XA_REMOTE_NODE, true); } id = pcmk__cluster_get_xml_id(node); if ((id == NULL) || (pcmk__xe_set(node_state, PCMK_XA_ID, id) != pcmk_rc_ok)) { crm_info("Node update for %s cancelled: no ID", node->name); pcmk__xml_free(node_state); return NULL; } pcmk__xe_set(node_state, PCMK_XA_UNAME, node->name); if (pcmk__is_set(flags, controld_node_update_cluster)) { if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) { // A value 0 means the node is not a cluster member. pcmk__xe_set_ll(node_state, PCMK__XA_IN_CCM, node->when_member); } else { - pcmk__xe_set_bool_attr(node_state, PCMK__XA_IN_CCM, - pcmk__str_eq(node->state, PCMK_VALUE_MEMBER, - pcmk__str_none)); + pcmk__xe_set_bool(node_state, PCMK__XA_IN_CCM, + pcmk__str_eq(node->state, PCMK_VALUE_MEMBER, + pcmk__str_none)); } } if (!pcmk__is_set(node->flags, pcmk__node_status_remote)) { if (pcmk__is_set(flags, controld_node_update_peer)) { if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) { // A value 0 means the peer is offline in CPG. pcmk__xe_set_ll(node_state, PCMK_XA_CRMD, node->when_online); } else { // @COMPAT DCs < 2.1.7 use online/offline rather than timestamp value = PCMK_VALUE_OFFLINE; if (pcmk__is_set(node->processes, crm_get_cluster_proc())) { value = PCMK_VALUE_ONLINE; } pcmk__xe_set(node_state, PCMK_XA_CRMD, value); } } if (pcmk__is_set(flags, controld_node_update_join)) { if (controld_get_join_phase(node) <= controld_join_none) { value = CRMD_JOINSTATE_DOWN; } else { value = CRMD_JOINSTATE_MEMBER; } pcmk__xe_set(node_state, PCMK__XA_JOIN, value); } if (pcmk__is_set(flags, controld_node_update_expected)) { pcmk__xe_set(node_state, PCMK_XA_EXPECTED, node->expected); } } pcmk__xe_set(node_state, PCMK_XA_CRM_DEBUG_ORIGIN, source); return node_state; } static void remove_conflicting_node_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *node_uuid = user_data; do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, "Deletion of the unknown conflicting node \"%s\": %s (rc=%d)", node_uuid, pcmk_strerror(rc), rc); } static void search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *new_node_uuid = user_data; xmlNode *node_xml = NULL; if (rc != pcmk_ok) { if (rc != -ENXIO) { crm_notice("Searching conflicting nodes for %s failed: %s (%d)", new_node_uuid, pcmk_strerror(rc), rc); } return; } else if (output == NULL) { return; } if (pcmk__xe_is(output, PCMK_XE_NODE)) { node_xml = output; } else { node_xml = pcmk__xe_first_child(output, PCMK_XE_NODE, NULL, NULL); } for (; node_xml != NULL; node_xml = pcmk__xe_next(node_xml, PCMK_XE_NODE)) { const char *node_uuid = NULL; const char *node_uname = NULL; GHashTableIter iter; pcmk__node_status_t *node = NULL; gboolean known = FALSE; node_uuid = pcmk__xe_get(node_xml, PCMK_XA_ID); node_uname = pcmk__xe_get(node_xml, PCMK_XA_UNAME); if (node_uuid == NULL || node_uname == NULL) { continue; } g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { if ((node != NULL) && pcmk__str_eq(node->xml_id, node_uuid, pcmk__str_casei) && pcmk__str_eq(node->name, node_uname, pcmk__str_casei)) { known = TRUE; break; } } if (known == FALSE) { cib_t *cib_conn = controld_globals.cib_conn; int delete_call_id = 0; xmlNode *node_state_xml = NULL; crm_notice("Deleting unknown node %s/%s which has conflicting uname with %s", node_uuid, node_uname, new_node_uuid); delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_NODES, node_xml, cib_none); fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid), remove_conflicting_node_callback); node_state_xml = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE); pcmk__xe_set(node_state_xml, PCMK_XA_ID, node_uuid); pcmk__xe_set(node_state_xml, PCMK_XA_UNAME, node_uname); delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_STATUS, node_state_xml, cib_none); fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid), remove_conflicting_node_callback); pcmk__xml_free(node_state_xml); } } } /*! * \internal * \brief Populate a \c PCMK_XE_NODES element from the peer cache * * Create a \c PCMK_XE_NODE element for each node in the cache. * * \param[in,out] nodes_xml \c PCMK_XE_NODES element to populate */ static void populate_cib_nodes_from_cache(xmlNode *nodes_xml) { GString *xpath = NULL; GHashTableIter iter; pcmk__node_status_t *node = NULL; g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { cib_t *cib_conn = controld_globals.cib_conn; int call_id = 0; xmlNode *new_node = NULL; if ((node->xml_id == NULL) || (node->name == NULL)) { // We need both in order for the node to be valid continue; } crm_trace("Creating node entry for %s/%s", node->name, node->xml_id); new_node = pcmk__xe_create(nodes_xml, PCMK_XE_NODE); pcmk__xe_set(new_node, PCMK_XA_ID, node->xml_id); pcmk__xe_set(new_node, PCMK_XA_UNAME, node->name); if (xpath == NULL) { xpath = g_string_sized_new(512); } else { g_string_truncate(xpath, 0); } // Search and remove unknown nodes with the conflicting uname from CIB pcmk__g_strcat(xpath, "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_NODES "/" PCMK_XE_NODE "[@" PCMK_XA_UNAME "='", node->name, "']" "[@" PCMK_XA_ID "!='", node->xml_id, "']", NULL); call_id = cib_conn->cmds->query(cib_conn, xpath->str, NULL, cib_xpath); fsa_register_cib_callback(call_id, pcmk__str_copy(node->xml_id), search_conflicting_node_callback); } if (xpath != NULL) { g_string_free(xpath, TRUE); } } static void node_list_update_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { fsa_data_t *msg_data = NULL; if(call_id < pcmk_ok) { crm_err("Node list update failed: %s (%d)", pcmk_strerror(call_id), call_id); crm_log_xml_debug(msg, "update:failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } else if(rc < pcmk_ok) { crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "update:failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } void populate_cib_nodes(uint32_t flags, const char *source) { bool from_cache = true; xmlNode *node_list = pcmk__xe_create(NULL, PCMK_XE_NODES); GHashTableIter iter; pcmk__node_status_t *node = NULL; #if SUPPORT_COROSYNC if (!pcmk__is_set(flags, controld_node_update_quick) && (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync)) { from_cache = !pcmk__corosync_add_nodes(node_list); } #endif crm_trace("Populating <" PCMK_XE_NODES "> section of CIB from %s", (from_cache? "peer cache" : "cluster")); if (from_cache) { populate_cib_nodes_from_cache(node_list); } if (controld_update_cib(PCMK_XE_NODES, node_list, cib_none, node_list_update_callback) != pcmk_rc_ok) { // Callback logs an error goto done; } if (pcmk__peer_cache == NULL) { // We don't have the necessary info to update the CIB's status section goto done; } if (!AM_I_DC) { // Only the DC populates the status section (@TODO Why only status?) goto done; } pcmk__xml_free(node_list); node_list = pcmk__xe_create(NULL, PCMK_XE_STATUS); g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { create_node_state_update(node, flags, node_list, source); } if (pcmk__remote_peer_cache != NULL) { g_hash_table_iter_init(&iter, pcmk__remote_peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) { create_node_state_update(node, flags, node_list, source); } } controld_update_cib(PCMK_XE_STATUS, node_list, cib_none, crmd_node_update_complete); done: pcmk__xml_free(node_list); } static void cib_quorum_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { fsa_data_t *msg_data = NULL; if (rc == pcmk_ok) { crm_trace("Quorum update %d complete", call_id); } else { crm_err("Quorum update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc); crm_log_xml_debug(msg, "failed"); register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL); } } void crm_update_quorum(gboolean quorum, gboolean force_update) { bool has_quorum = pcmk__is_set(controld_globals.flags, controld_has_quorum); if (quorum) { controld_set_global_flags(controld_ever_had_quorum); } else if (pcmk__all_flags_set(controld_globals.flags, controld_ever_had_quorum |controld_no_quorum_panic)) { pcmk__panic("Quorum lost"); } if (AM_I_DC && ((has_quorum && !quorum) || (!has_quorum && quorum) || force_update)) { xmlNode *update = NULL; update = pcmk__xe_create(NULL, PCMK_XE_CIB); pcmk__xe_set_int(update, PCMK_XA_HAVE_QUORUM, quorum); pcmk__xe_set(update, PCMK_XA_DC_UUID, controld_globals.our_uuid); crm_debug("Updating quorum status to %s", pcmk__btoa(quorum)); controld_update_cib(PCMK_XE_CIB, update, cib_none, cib_quorum_update_complete); pcmk__xml_free(update); /* Quorum changes usually cause a new transition via other activity: * quorum gained via a node joining will abort via the node join, * and quorum lost via a node leaving will usually abort via resource * activity and/or fencing. * * However, it is possible that nothing else causes a transition (e.g. * someone forces quorum via corosync-cmaptcl, or quorum is lost due to * a node in standby shutting down cleanly), so here ensure a new * transition is triggered. */ if (quorum) { /* If quorum was gained, abort after a short delay, in case multiple * nodes are joining around the same time, so the one that brings us * to quorum doesn't cause all the remaining ones to be fenced. */ abort_after_delay(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Quorum gained", 5000); } else { abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Quorum lost", NULL); } } if (quorum) { controld_set_global_flags(controld_has_quorum); } else { controld_clear_global_flags(controld_has_quorum); } } diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c index 02fb8fae67..4667b7fe08 100644 --- a/daemons/controld/controld_messages.c +++ b/daemons/controld/controld_messages.c @@ -1,1347 +1,1346 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include // PRIx64 #include // uint64_t #include #include #include #include #include #include #include #include #include static enum crmd_fsa_input handle_message(xmlNode *msg, enum crmd_fsa_cause cause); static xmlNode* create_ping_reply(const xmlNode *msg); static void handle_response(xmlNode *stored_msg); static enum crmd_fsa_input handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause); static enum crmd_fsa_input handle_shutdown_request(xmlNode *stored_msg); static void send_msg_via_ipc(xmlNode * msg, const char *sys, const char *src); /* debug only, can wrap all it likes */ static int last_data_id = 0; void register_fsa_error_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input, fsa_data_t * cur_data, void *new_data, const char *raised_from) { /* save the current actions if any */ if (controld_globals.fsa_actions != A_NOTHING) { register_fsa_input_adv(cur_data ? cur_data->fsa_cause : C_FSA_INTERNAL, I_NULL, cur_data ? cur_data->data : NULL, controld_globals.fsa_actions, TRUE, __func__); } /* reset the action list */ crm_info("Resetting the current action list"); fsa_dump_actions(controld_globals.fsa_actions, "Drop"); controld_globals.fsa_actions = A_NOTHING; /* register the error */ register_fsa_input_adv(cause, input, new_data, A_NOTHING, TRUE, raised_from); } void register_fsa_input_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input, void *data, uint64_t with_actions, gboolean prepend, const char *raised_from) { fsa_data_t *fsa_data = NULL; if (raised_from == NULL) { raised_from = ""; } if (input == I_NULL && with_actions == A_NOTHING /* && data == NULL */ ) { /* no point doing anything */ crm_err("Cannot add entry to queue: no input and no action"); return; } if (input == I_WAIT_FOR_EVENT) { controld_set_global_flags(controld_fsa_is_stalled); crm_debug("Stalling the FSA pending further input: " "source=%s cause=%s data=%p queue=%u", raised_from, fsa_cause2string(cause), data, controld_fsa_message_queue_length()); if (data == NULL) { controld_set_fsa_action_flags(with_actions); fsa_dump_actions(with_actions, "Restored"); return; } prepend = FALSE; /* Store everything in the new event and reset * controld_globals.fsa_actions */ with_actions |= controld_globals.fsa_actions; controld_globals.fsa_actions = A_NOTHING; } last_data_id++; crm_trace("%s %s FSA input %d (%s) due to %s, %s data", raised_from, (prepend? "prepended" : "appended"), last_data_id, fsa_input2string(input), fsa_cause2string(cause), (data? "with" : "without")); fsa_data = pcmk__assert_alloc(1, sizeof(fsa_data_t)); fsa_data->id = last_data_id; fsa_data->fsa_input = input; fsa_data->fsa_cause = cause; fsa_data->origin = raised_from; fsa_data->data = NULL; fsa_data->data_type = fsa_dt_none; fsa_data->actions = with_actions; if (with_actions != A_NOTHING) { crm_trace("Adding actions %.16" PRIx64 " to input", with_actions); } if (data != NULL) { switch (cause) { case C_FSA_INTERNAL: case C_CRMD_STATUS_CALLBACK: case C_IPC_MESSAGE: case C_HA_MESSAGE: CRM_CHECK(((ha_msg_input_t *) data)->msg != NULL, crm_err("Bogus data from %s", raised_from)); crm_trace("Copying %s data from %s as cluster message data", fsa_cause2string(cause), raised_from); fsa_data->data = copy_ha_msg_input(data); fsa_data->data_type = fsa_dt_ha_msg; break; case C_LRM_OP_CALLBACK: crm_trace("Copying %s data from %s as lrmd_event_data_t", fsa_cause2string(cause), raised_from); fsa_data->data = lrmd_copy_event((lrmd_event_data_t *) data); fsa_data->data_type = fsa_dt_lrm; break; case C_TIMER_POPPED: case C_SHUTDOWN: case C_UNKNOWN: case C_STARTUP: crm_crit("Copying %s data (from %s) is not yet implemented", fsa_cause2string(cause), raised_from); crmd_exit(CRM_EX_SOFTWARE); break; } } if (controld_globals.fsa_message_queue == NULL) { controld_globals.fsa_message_queue = g_queue_new(); } if (prepend) { g_queue_push_head(controld_globals.fsa_message_queue, fsa_data); } else { g_queue_push_tail(controld_globals.fsa_message_queue, fsa_data); } crm_trace("FSA message queue length is %u", controld_fsa_message_queue_length()); if (input != I_WAIT_FOR_EVENT) { controld_trigger_fsa(); } } ha_msg_input_t * copy_ha_msg_input(ha_msg_input_t * orig) { xmlNode *wrapper = NULL; ha_msg_input_t *copy = pcmk__assert_alloc(1, sizeof(ha_msg_input_t)); copy->msg = (orig != NULL)? pcmk__xml_copy(NULL, orig->msg) : NULL; wrapper = pcmk__xe_first_child(copy->msg, PCMK__XE_CRM_XML, NULL, NULL); copy->xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); return copy; } void delete_fsa_input(fsa_data_t * fsa_data) { lrmd_event_data_t *op = NULL; xmlNode *foo = NULL; if (fsa_data == NULL) { return; } crm_trace("About to free %s data", fsa_cause2string(fsa_data->fsa_cause)); if (fsa_data->data != NULL) { switch (fsa_data->data_type) { case fsa_dt_ha_msg: delete_ha_msg_input(fsa_data->data); break; case fsa_dt_xml: foo = fsa_data->data; pcmk__xml_free(foo); break; case fsa_dt_lrm: op = (lrmd_event_data_t *) fsa_data->data; lrmd_free_event(op); break; case fsa_dt_none: if (fsa_data->data != NULL) { crm_err("Don't know how to free %s data from %s", fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin); crmd_exit(CRM_EX_SOFTWARE); } break; } crm_trace("%s data freed", fsa_cause2string(fsa_data->fsa_cause)); } free(fsa_data); } void * fsa_typed_data_adv(fsa_data_t * fsa_data, enum fsa_data_type a_type, const char *caller) { void *ret_val = NULL; if (fsa_data == NULL) { crm_err("%s: No FSA data available", caller); } else if (fsa_data->data == NULL) { crm_err("%s: No message data available. Origin: %s", caller, fsa_data->origin); } else if (fsa_data->data_type != a_type) { crm_crit("%s: Message data was the wrong type! %d vs. requested=%d. Origin: %s", caller, fsa_data->data_type, a_type, fsa_data->origin); pcmk__assert(fsa_data->data_type == a_type); } else { ret_val = fsa_data->data; } return ret_val; } /* A_MSG_ROUTE */ void do_msg_route(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg); route_message(msg_data->fsa_cause, input->msg); } void route_message(enum crmd_fsa_cause cause, xmlNode * input) { ha_msg_input_t fsa_input; enum crmd_fsa_input result = I_NULL; fsa_input.msg = input; CRM_CHECK(cause == C_IPC_MESSAGE || cause == C_HA_MESSAGE, return); /* try passing the buck first */ if (relay_message(input, cause == C_IPC_MESSAGE)) { return; } /* handle locally */ result = handle_message(input, cause); /* done or process later? */ switch (result) { case I_NULL: case I_ROUTER: case I_NODE_JOIN: case I_JOIN_REQUEST: case I_JOIN_RESULT: break; default: /* Defering local processing of message */ register_fsa_input_later(cause, result, &fsa_input); return; } if (result != I_NULL) { /* add to the front of the queue */ register_fsa_input(cause, result, &fsa_input); } } gboolean relay_message(xmlNode * msg, gboolean originated_locally) { enum pcmk_ipc_server dest = pcmk_ipc_unknown; bool is_for_dc = false; bool is_for_dcib = false; bool is_for_te = false; bool is_for_crm = false; bool is_for_cib = false; bool is_local = false; bool broadcast = false; const char *host_to = NULL; const char *sys_to = NULL; const char *sys_from = NULL; const char *type = NULL; const char *task = NULL; const char *ref = NULL; pcmk__node_status_t *node_to = NULL; CRM_CHECK(msg != NULL, return TRUE); host_to = pcmk__xe_get(msg, PCMK__XA_CRM_HOST_TO); sys_to = pcmk__xe_get(msg, PCMK__XA_CRM_SYS_TO); sys_from = pcmk__xe_get(msg, PCMK__XA_CRM_SYS_FROM); type = pcmk__xe_get(msg, PCMK__XA_T); task = pcmk__xe_get(msg, PCMK__XA_CRM_TASK); ref = pcmk__xe_get(msg, PCMK_XA_REFERENCE); broadcast = pcmk__str_empty(host_to); if (ref == NULL) { ref = "without reference ID"; } if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) { crm_trace("Received hello %s from %s (no processing needed)", ref, pcmk__s(sys_from, "unidentified source")); crm_log_xml_trace(msg, "hello"); return TRUE; } // Require message type (set by pcmk__new_request()) if (!pcmk__str_eq(type, PCMK__VALUE_CRMD, pcmk__str_none)) { crm_warn("Ignoring invalid message %s with type '%s' " "(not '" PCMK__VALUE_CRMD "')", ref, pcmk__s(type, "")); crm_log_xml_trace(msg, "ignored"); return TRUE; } // Require a destination subsystem (also set by pcmk__new_request()) if (sys_to == NULL) { crm_warn("Ignoring invalid message %s with no " PCMK__XA_CRM_SYS_TO, ref); crm_log_xml_trace(msg, "ignored"); return TRUE; } // Get the message type appropriate to the destination subsystem if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) { dest = pcmk__parse_server(sys_to); if (dest == pcmk_ipc_unknown) { /* Unrecognized value, use a sane default * * @TODO Maybe we should bail instead */ dest = pcmk_ipc_controld; } } is_for_dc = (strcasecmp(CRM_SYSTEM_DC, sys_to) == 0); is_for_dcib = (strcasecmp(CRM_SYSTEM_DCIB, sys_to) == 0); is_for_te = (strcasecmp(CRM_SYSTEM_TENGINE, sys_to) == 0); is_for_cib = (strcasecmp(CRM_SYSTEM_CIB, sys_to) == 0); is_for_crm = (strcasecmp(CRM_SYSTEM_CRMD, sys_to) == 0); // Check whether message should be processed locally is_local = false; if (broadcast) { if (is_for_dc || is_for_te) { is_local = false; } else if (is_for_crm) { if (pcmk__strcase_any_of(task, CRM_OP_NODE_INFO, PCMK__CONTROLD_CMD_NODES, NULL)) { /* Node info requests do not specify a host, which is normally * treated as "all hosts", because the whole point is that the * client may not know the local node name. Always handle these * requests locally. */ is_local = true; } else { is_local = !originated_locally; } } else { is_local = true; } } else if (controld_is_local_node(host_to)) { is_local = true; } else if (is_for_crm && pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) { xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *msg_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); const char *mode = pcmk__xe_get(msg_data, PCMK__XA_MODE); if (pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) { // Local delete of an offline node's resource history is_local = true; } } // If is for DC and DC is not yet selected if (is_for_dc && pcmk__str_eq(task, CRM_OP_PING, pcmk__str_casei) && (controld_globals.dc_name == NULL)) { xmlNode *reply = create_ping_reply(msg); sys_to = pcmk__xe_get(reply, PCMK__XA_CRM_SYS_TO); // Explicitly leave src empty. It indicates that dc is "not yet selected" send_msg_via_ipc(reply, sys_to, NULL); pcmk__xml_free(reply); return TRUE; } // Check whether message should be relayed if (is_for_dc || is_for_dcib || is_for_te) { if (AM_I_DC) { if (is_for_te) { crm_trace("Route message %s locally as transition request", ref); crm_log_xml_trace(msg, sys_to); send_msg_via_ipc(msg, sys_to, controld_globals.cluster->priv->node_name); return TRUE; // No further processing of message is needed } crm_trace("Route message %s locally as DC request", ref); return FALSE; // More to be done by caller } if (originated_locally && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE, CRM_SYSTEM_TENGINE, NULL)) { crm_trace("Relay message %s to DC (via %s)", ref, pcmk__s(host_to, "broadcast")); crm_log_xml_trace(msg, "relayed"); if (!broadcast) { node_to = pcmk__get_node(0, host_to, NULL, pcmk__node_search_cluster_member); } pcmk__cluster_send_message(node_to, dest, msg); return TRUE; } /* Transition engine and scheduler messages are sent only to the DC on * the same node. If we are no longer the DC, discard this message. */ crm_trace("Ignoring message %s because we are no longer DC", ref); crm_log_xml_trace(msg, "ignored"); return TRUE; // No further processing of message is needed } if (is_local) { if (is_for_crm || is_for_cib) { crm_trace("Route message %s locally as controller request", ref); return FALSE; // More to be done by caller } crm_trace("Relay message %s locally to %s", ref, sys_to); crm_log_xml_trace(msg, "IPC-relay"); send_msg_via_ipc(msg, sys_to, controld_globals.cluster->priv->node_name); return TRUE; } if (!broadcast) { node_to = pcmk__search_node_caches(0, host_to, NULL, pcmk__node_search_cluster_member); if (node_to == NULL) { crm_warn("Ignoring message %s because node %s is unknown", ref, host_to); crm_log_xml_trace(msg, "ignored"); return TRUE; } } crm_trace("Relay message %s to %s", ref, pcmk__s(host_to, "all peers")); crm_log_xml_trace(msg, "relayed"); pcmk__cluster_send_message(node_to, dest, msg); return TRUE; } // Return true if field contains a positive integer static bool authorize_version(xmlNode *message_data, const char *field, const char *client_name, const char *ref, const char *uuid) { const char *version = pcmk__xe_get(message_data, field); long long version_num; if ((pcmk__scan_ll(version, &version_num, -1LL) != pcmk_rc_ok) || (version_num < 0LL)) { crm_warn("Rejected IPC hello from %s: '%s' is not a valid protocol %s " QB_XS " ref=%s uuid=%s", client_name, ((version == NULL)? "" : version), field, (ref? ref : "none"), uuid); return false; } return true; } /*! * \internal * \brief Check whether a client IPC message is acceptable * * If a given client IPC message is a hello, "authorize" it by ensuring it has * valid information such as a protocol version, and return false indicating * that nothing further needs to be done with the message. If the message is not * a hello, just return true to indicate it needs further processing. * * \param[in] client_msg XML of IPC message * \param[in,out] curr_client If IPC is not proxied, client that sent message * \param[in] proxy_session If IPC is proxied, the session ID * * \return true if message needs further processing, false if it doesn't */ bool controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_client, const char *proxy_session) { xmlNode *wrapper = NULL; xmlNode *message_data = NULL; const char *client_name = NULL; const char *op = pcmk__xe_get(client_msg, PCMK__XA_CRM_TASK); const char *ref = pcmk__xe_get(client_msg, PCMK_XA_REFERENCE); const char *uuid = (curr_client? curr_client->id : proxy_session); if (uuid == NULL) { crm_warn("IPC message from client rejected: No client identifier " QB_XS " ref=%s", (ref? ref : "none")); goto rejected; } if (!pcmk__str_eq(CRM_OP_HELLO, op, pcmk__str_casei)) { // Only hello messages need to be authorized return true; } wrapper = pcmk__xe_first_child(client_msg, PCMK__XE_CRM_XML, NULL, NULL); message_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); client_name = pcmk__xe_get(message_data, PCMK__XA_CLIENT_NAME); if (pcmk__str_empty(client_name)) { crm_warn("IPC hello from client rejected: No client name", QB_XS " ref=%s uuid=%s", (ref? ref : "none"), uuid); goto rejected; } if (!authorize_version(message_data, PCMK__XA_MAJOR_VERSION, client_name, ref, uuid)) { goto rejected; } if (!authorize_version(message_data, PCMK__XA_MINOR_VERSION, client_name, ref, uuid)) { goto rejected; } crm_trace("Validated IPC hello from client %s", client_name); crm_log_xml_trace(client_msg, "hello"); if (curr_client) { curr_client->userdata = pcmk__str_copy(client_name); } controld_trigger_fsa(); return false; rejected: crm_log_xml_trace(client_msg, "rejected"); if (curr_client) { qb_ipcs_disconnect(curr_client->ipcs); } return false; } static enum crmd_fsa_input handle_message(xmlNode *msg, enum crmd_fsa_cause cause) { const char *type = NULL; CRM_CHECK(msg != NULL, return I_NULL); type = pcmk__xe_get(msg, PCMK__XA_SUBT); if (pcmk__str_eq(type, PCMK__VALUE_REQUEST, pcmk__str_none)) { return handle_request(msg, cause); } if (pcmk__str_eq(type, PCMK__VALUE_RESPONSE, pcmk__str_none)) { handle_response(msg); return I_NULL; } crm_warn("Ignoring message with unknown " PCMK__XA_SUBT" '%s'", pcmk__s(type, "")); crm_log_xml_trace(msg, "bad"); return I_NULL; } static enum crmd_fsa_input handle_failcount_op(xmlNode * stored_msg) { const char *rsc = NULL; const char *uname = NULL; const char *op = NULL; char *interval_spec = NULL; guint interval_ms = 0; gboolean is_remote_node = FALSE; xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *xml_op = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); if (xml_op) { xmlNode *xml_rsc = pcmk__xe_first_child(xml_op, PCMK_XE_PRIMITIVE, NULL, NULL); xmlNode *xml_attrs = pcmk__xe_first_child(xml_op, PCMK__XE_ATTRIBUTES, NULL, NULL); if (xml_rsc) { rsc = pcmk__xe_id(xml_rsc); } if (xml_attrs) { op = pcmk__xe_get(xml_attrs, CRM_META "_" PCMK__META_CLEAR_FAILURE_OP); pcmk__xe_get_guint(xml_attrs, CRM_META "_" PCMK__META_CLEAR_FAILURE_INTERVAL, &interval_ms); } } uname = pcmk__xe_get(xml_op, PCMK__META_ON_NODE); if ((rsc == NULL) || (uname == NULL)) { crm_log_xml_warn(stored_msg, "invalid failcount op"); return I_NULL; } if (pcmk__xe_get(xml_op, PCMK__XA_ROUTER_NODE)) { is_remote_node = TRUE; } crm_debug("Clearing failures for %s-interval %s on %s " "from attribute manager, CIB, and executor state", pcmk__readable_interval(interval_ms), rsc, uname); if (interval_ms) { interval_spec = pcmk__assert_asprintf("%ums", interval_ms); } update_attrd_clear_failures(uname, rsc, op, interval_spec, is_remote_node); free(interval_spec); controld_cib_delete_last_failure(rsc, uname, op, interval_ms); lrm_clear_last_failure(rsc, uname, op, interval_ms); return I_NULL; } static enum crmd_fsa_input handle_lrm_delete(xmlNode *stored_msg) { const char *mode = NULL; xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *msg_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); CRM_CHECK(msg_data != NULL, return I_NULL); /* CRM_OP_LRM_DELETE has two distinct modes. The default behavior is to * relay the operation to the affected node, which will unregister the * resource from the local executor, clear the resource's history from the * CIB, and do some bookkeeping in the controller. * * However, if the affected node is offline, the client will specify * mode=PCMK__VALUE_CIB which means the controller receiving the operation * should clear the resource's history from the CIB and nothing else. This * is used to clear shutdown locks. */ mode = pcmk__xe_get(msg_data, PCMK__XA_MODE); if (!pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) { // Relay to affected node pcmk__xe_set(stored_msg, PCMK__XA_CRM_SYS_TO, CRM_SYSTEM_LRMD); return I_ROUTER; } else { // Delete CIB history locally (compare with do_lrm_delete()) const char *from_sys = NULL; const char *user_name = NULL; const char *rsc_id = NULL; const char *node = NULL; xmlNode *rsc_xml = NULL; int rc = pcmk_rc_ok; rsc_xml = pcmk__xe_first_child(msg_data, PCMK_XE_PRIMITIVE, NULL, NULL); CRM_CHECK(rsc_xml != NULL, return I_NULL); rsc_id = pcmk__xe_id(rsc_xml); from_sys = pcmk__xe_get(stored_msg, PCMK__XA_CRM_SYS_FROM); node = pcmk__xe_get(msg_data, PCMK__META_ON_NODE); user_name = pcmk__update_acl_user(stored_msg, PCMK__XA_CRM_USER, NULL); crm_debug("Handling " CRM_OP_LRM_DELETE " for %s on %s locally%s%s " "(clearing CIB resource history only)", rsc_id, node, (user_name? " for user " : ""), (user_name? user_name : "")); rc = controld_delete_resource_history(rsc_id, node, user_name, cib_dryrun|cib_sync_call); if (rc == pcmk_rc_ok) { rc = controld_delete_resource_history(rsc_id, node, user_name, crmd_cib_smart_opt()); } /* Notify client. Also notify tengine if mode=PCMK__VALUE_CIB and * op=CRM_OP_LRM_DELETE. */ if (from_sys) { lrmd_event_data_t *op = NULL; const char *from_host = pcmk__xe_get(stored_msg, PCMK__XA_SRC); const char *transition; if (strcmp(from_sys, CRM_SYSTEM_TENGINE)) { transition = pcmk__xe_get(msg_data, PCMK__XA_TRANSITION_KEY); } else { transition = pcmk__xe_get(stored_msg, PCMK__XA_TRANSITION_KEY); } crm_info("Notifying %s on %s that %s was%s deleted", from_sys, (from_host? from_host : "local node"), rsc_id, ((rc == pcmk_rc_ok)? "" : " not")); op = lrmd_new_event(rsc_id, PCMK_ACTION_DELETE, 0); op->type = lrmd_event_exec_complete; op->user_data = pcmk__str_copy(pcmk__s(transition, FAKE_TE_ID)); op->params = pcmk__strkey_table(free, free); pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET); controld_rc2event(op, rc); controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id); lrmd_free_event(op); controld_trigger_delete_refresh(from_sys, rsc_id); } return I_NULL; } } /*! * \brief Handle a CRM_OP_REMOTE_STATE message by updating remote peer cache * * \param[in] msg Message XML * * \return Next FSA input */ static enum crmd_fsa_input handle_remote_state(const xmlNode *msg) { const char *conn_host = NULL; const char *remote_uname = pcmk__xe_id(msg); pcmk__node_status_t *remote_peer; bool remote_is_up = false; int rc = pcmk_rc_ok; rc = pcmk__xe_get_bool(msg, PCMK__XA_IN_CCM, &remote_is_up); CRM_CHECK(remote_uname && rc == pcmk_rc_ok, return I_NULL); remote_peer = pcmk__cluster_lookup_remote_node(remote_uname); CRM_CHECK(remote_peer, return I_NULL); pcmk__update_peer_state(__func__, remote_peer, remote_is_up ? PCMK_VALUE_MEMBER : PCMK__VALUE_LOST, 0); conn_host = pcmk__xe_get(msg, PCMK__XA_CONNECTION_HOST); if (conn_host) { pcmk__str_update(&remote_peer->conn_host, conn_host); } else if (remote_peer->conn_host) { free(remote_peer->conn_host); remote_peer->conn_host = NULL; } return I_NULL; } /*! * \brief Handle a CRM_OP_PING message * * \param[in] msg Message XML * * \return Next FSA input */ static xmlNode* create_ping_reply(const xmlNode *msg) { const char *value = NULL; xmlNode *ping = NULL; xmlNode *reply = NULL; // Build reply ping = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE); value = pcmk__xe_get(msg, PCMK__XA_CRM_SYS_TO); pcmk__xe_set(ping, PCMK__XA_CRM_SUBSYSTEM, value); // Add controller state value = fsa_state2string(controld_globals.fsa_state); pcmk__xe_set(ping, PCMK__XA_CRMD_STATE, value); crm_notice("Current ping state: %s", value); // CTS needs this // Add controller health // @TODO maybe do some checks to determine meaningful status pcmk__xe_set(ping, PCMK_XA_RESULT, "ok"); reply = pcmk__new_reply(msg, ping); pcmk__xml_free(ping); return reply; } static enum crmd_fsa_input handle_ping(const xmlNode *msg) { xmlNode *reply = create_ping_reply(msg); if (reply != NULL) { (void) relay_message(reply, TRUE); pcmk__xml_free(reply); } // Nothing further to do return I_NULL; } /*! * \brief Handle a PCMK__CONTROLD_CMD_NODES message * * \param[in] request Message XML * * \return Next FSA input */ static enum crmd_fsa_input handle_node_list(const xmlNode *request) { GHashTableIter iter; pcmk__node_status_t *node = NULL; xmlNode *reply = NULL; xmlNode *reply_data = NULL; // Create message data for reply reply_data = pcmk__xe_create(NULL, PCMK_XE_NODES); g_hash_table_iter_init(&iter, pcmk__peer_cache); while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { xmlNode *xml = pcmk__xe_create(reply_data, PCMK_XE_NODE); pcmk__xe_set_ll(xml, PCMK_XA_ID, (long long) node->cluster_layer_id); // uint32_t pcmk__xe_set(xml, PCMK_XA_UNAME, node->name); pcmk__xe_set(xml, PCMK__XA_IN_CCM, node->state); } // Create and send reply reply = pcmk__new_reply(request, reply_data); pcmk__xml_free(reply_data); if (reply) { (void) relay_message(reply, TRUE); pcmk__xml_free(reply); } // Nothing further to do return I_NULL; } /*! * \brief Handle a CRM_OP_NODE_INFO request * * \param[in] msg Message XML * * \return Next FSA input */ static enum crmd_fsa_input handle_node_info_request(const xmlNode *msg) { const char *value = NULL; pcmk__node_status_t *node = NULL; int node_id = 0; xmlNode *reply = NULL; xmlNode *reply_data = NULL; // Build reply reply_data = pcmk__xe_create(NULL, PCMK_XE_NODE); pcmk__xe_set(reply_data, PCMK__XA_CRM_SUBSYSTEM, CRM_SYSTEM_CRMD); // Add whether current partition has quorum - pcmk__xe_set_bool_attr(reply_data, PCMK_XA_HAVE_QUORUM, - pcmk__is_set(controld_globals.flags, - controld_has_quorum)); + pcmk__xe_set_bool(reply_data, PCMK_XA_HAVE_QUORUM, + pcmk__is_set(controld_globals.flags, + controld_has_quorum)); /* Check whether client requested node info by ID and/or name * * @TODO A Corosync-layer node ID is of type uint32_t. We should be able to * handle legitimate node IDs greater than INT_MAX, but currently we do not. */ pcmk__xe_get_int(msg, PCMK_XA_ID, &node_id); if (node_id < 0) { node_id = 0; } value = pcmk__xe_get(msg, PCMK_XA_UNAME); // Default to local node if none given if ((node_id == 0) && (value == NULL)) { value = controld_globals.cluster->priv->node_name; } node = pcmk__search_node_caches(node_id, value, NULL, pcmk__node_search_any); if (node) { pcmk__xe_set(reply_data, PCMK_XA_ID, node->xml_id); pcmk__xe_set(reply_data, PCMK_XA_UNAME, node->name); pcmk__xe_set(reply_data, PCMK_XA_CRMD, node->state); - pcmk__xe_set_bool_attr(reply_data, PCMK_XA_REMOTE_NODE, - pcmk__is_set(node->flags, - pcmk__node_status_remote)); + pcmk__xe_set_bool(reply_data, PCMK_XA_REMOTE_NODE, + pcmk__is_set(node->flags, pcmk__node_status_remote)); } // Send reply reply = pcmk__new_reply(msg, reply_data); pcmk__xml_free(reply_data); if (reply != NULL) { (void) relay_message(reply, TRUE); pcmk__xml_free(reply); } // Nothing further to do return I_NULL; } static void verify_feature_set(xmlNode *msg) { const char *dc_version = pcmk__xe_get(msg, PCMK_XA_CRM_FEATURE_SET); if (dc_version == NULL) { /* All we really know is that the DC feature set is older than 3.1.0, * but that's also all that really matters. */ dc_version = "3.0.14"; } if (feature_set_compatible(dc_version, CRM_FEATURE_SET)) { crm_trace("Local feature set (%s) is compatible with DC's (%s)", CRM_FEATURE_SET, dc_version); } else { crm_err("Local feature set (%s) is incompatible with DC's (%s)", CRM_FEATURE_SET, dc_version); // Nothing is likely to improve without administrator involvement controld_set_fsa_input_flags(R_STAYDOWN); crmd_exit(CRM_EX_FATAL); } } // DC gets own shutdown all-clear static enum crmd_fsa_input handle_shutdown_self_ack(xmlNode *stored_msg) { const char *host_from = pcmk__xe_get(stored_msg, PCMK__XA_SRC); if (pcmk__is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { // The expected case -- we initiated own shutdown sequence crm_info("Shutting down controller"); return I_STOP; } if (pcmk__str_eq(host_from, controld_globals.dc_name, pcmk__str_casei)) { // Must be logic error -- DC confirming its own unrequested shutdown crm_err("Shutting down controller immediately due to " "unexpected shutdown confirmation"); return I_TERMINATE; } if (controld_globals.fsa_state != S_STOPPING) { // Shouldn't happen -- non-DC confirming unrequested shutdown crm_err("Starting new DC election because %s is " "confirming shutdown we did not request", (host_from? host_from : "another node")); return I_ELECTION; } // Shouldn't happen, but we are already stopping anyway crm_debug("Ignoring unexpected shutdown confirmation from %s", (host_from? host_from : "another node")); return I_NULL; } // Non-DC gets shutdown all-clear from DC static enum crmd_fsa_input handle_shutdown_ack(xmlNode *stored_msg) { const char *host_from = pcmk__xe_get(stored_msg, PCMK__XA_SRC); if (host_from == NULL) { crm_warn("Ignoring shutdown request without origin specified"); return I_NULL; } if (pcmk__str_eq(host_from, controld_globals.dc_name, pcmk__str_null_matches|pcmk__str_casei)) { if (pcmk__is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { crm_info("Shutting down controller after confirmation from %s", host_from); } else { crm_err("Shutting down controller after unexpected " "shutdown request from %s", host_from); controld_set_fsa_input_flags(R_STAYDOWN); } return I_STOP; } crm_warn("Ignoring shutdown request from %s because DC is %s", host_from, controld_globals.dc_name); return I_NULL; } static enum crmd_fsa_input handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) { xmlNode *msg = NULL; const char *op = pcmk__xe_get(stored_msg, PCMK__XA_CRM_TASK); /* Optimize this for the DC - it has the most to do */ crm_log_xml_trace(stored_msg, "request"); if (op == NULL) { crm_warn("Ignoring request without " PCMK__XA_CRM_TASK); return I_NULL; } if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) { const char *from = pcmk__xe_get(stored_msg, PCMK__XA_SRC); pcmk__node_status_t *node = pcmk__search_node_caches(0, from, NULL, pcmk__node_search_cluster_member); pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN); if(AM_I_DC == FALSE) { return I_NULL; /* Done */ } } /*========== DC-Only Actions ==========*/ if (AM_I_DC) { if (strcmp(op, CRM_OP_JOIN_ANNOUNCE) == 0) { return I_NODE_JOIN; } else if (strcmp(op, CRM_OP_JOIN_REQUEST) == 0) { return I_JOIN_REQUEST; } else if (strcmp(op, CRM_OP_JOIN_CONFIRM) == 0) { return I_JOIN_RESULT; } else if (strcmp(op, CRM_OP_SHUTDOWN) == 0) { return handle_shutdown_self_ack(stored_msg); } else if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) { // Another controller wants to shut down its node return handle_shutdown_request(stored_msg); } } /*========== common actions ==========*/ if (strcmp(op, CRM_OP_NOVOTE) == 0) { ha_msg_input_t fsa_input; fsa_input.msg = stored_msg; register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input, A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE, __func__); } else if (strcmp(op, CRM_OP_REMOTE_STATE) == 0) { /* a remote connection host is letting us know the node state */ return handle_remote_state(stored_msg); } else if (strcmp(op, CRM_OP_THROTTLE) == 0) { throttle_update(stored_msg); if (AM_I_DC && (controld_globals.transition_graph != NULL) && !controld_globals.transition_graph->complete) { crm_debug("The throttle changed. Trigger a graph."); trigger_graph(); } return I_NULL; } else if (strcmp(op, CRM_OP_CLEAR_FAILCOUNT) == 0) { return handle_failcount_op(stored_msg); } else if (strcmp(op, CRM_OP_VOTE) == 0) { /* count the vote and decide what to do after that */ ha_msg_input_t fsa_input; fsa_input.msg = stored_msg; register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input, A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE, __func__); /* Sometimes we _must_ go into S_ELECTION */ if (controld_globals.fsa_state == S_HALT) { crm_debug("Forcing an election from S_HALT"); return I_ELECTION; } } else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) { verify_feature_set(stored_msg); crm_debug("Raising I_JOIN_OFFER: join-%s", pcmk__xe_get(stored_msg, PCMK__XA_JOIN_ID)); return I_JOIN_OFFER; } else if (strcmp(op, CRM_OP_JOIN_ACKNAK) == 0) { crm_debug("Raising I_JOIN_RESULT: join-%s", pcmk__xe_get(stored_msg, PCMK__XA_JOIN_ID)); return I_JOIN_RESULT; } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0) { return handle_lrm_delete(stored_msg); } else if ((strcmp(op, CRM_OP_LRM_FAIL) == 0) || (strcmp(op, CRM_OP_REPROBE) == 0)) { pcmk__xe_set(stored_msg, PCMK__XA_CRM_SYS_TO, CRM_SYSTEM_LRMD); return I_ROUTER; } else if (strcmp(op, CRM_OP_NOOP) == 0) { return I_NULL; } else if (strcmp(op, CRM_OP_PING) == 0) { return handle_ping(stored_msg); } else if (strcmp(op, CRM_OP_NODE_INFO) == 0) { return handle_node_info_request(stored_msg); } else if (strcmp(op, CRM_OP_RM_NODE_CACHE) == 0) { int id = 0; const char *name = NULL; pcmk__xe_get_int(stored_msg, PCMK_XA_ID, &id); name = pcmk__xe_get(stored_msg, PCMK_XA_UNAME); if(cause == C_IPC_MESSAGE) { msg = pcmk__new_request(pcmk_ipc_controld, CRM_SYSTEM_CRMD, NULL, CRM_SYSTEM_CRMD, CRM_OP_RM_NODE_CACHE, NULL); if (!pcmk__cluster_send_message(NULL, pcmk_ipc_controld, msg)) { crm_err("Could not instruct peers to remove references to node %s/%u", name, id); } else { crm_notice("Instructing peers to remove references to node %s/%u", name, id); } pcmk__xml_free(msg); } else { pcmk__cluster_forget_cluster_node(id, name); /* If we're forgetting this node, also forget any failures to fence * it, so we don't carry that over to any node added later with the * same name. */ st_fail_count_reset(name); } } else if (strcmp(op, CRM_OP_MAINTENANCE_NODES) == 0) { xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); remote_ra_process_maintenance_nodes(xml); } else if (strcmp(op, PCMK__CONTROLD_CMD_NODES) == 0) { return handle_node_list(stored_msg); /*========== (NOT_DC)-Only Actions ==========*/ } else if (!AM_I_DC) { if (strcmp(op, CRM_OP_SHUTDOWN) == 0) { return handle_shutdown_ack(stored_msg); } } else { crm_err("Unexpected request (%s) sent to %s", op, AM_I_DC ? "the DC" : "non-DC node"); crm_log_xml_err(stored_msg, "Unexpected"); } return I_NULL; } static void handle_response(xmlNode *stored_msg) { const char *op = pcmk__xe_get(stored_msg, PCMK__XA_CRM_TASK); crm_log_xml_trace(stored_msg, "reply"); if (op == NULL) { crm_warn("Ignoring reply without " PCMK__XA_CRM_TASK); } else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) { // Check whether scheduler answer been superseded by subsequent request const char *msg_ref = pcmk__xe_get(stored_msg, PCMK_XA_REFERENCE); if (msg_ref == NULL) { crm_err("%s - Ignoring calculation with no reference", op); } else if (pcmk__str_eq(msg_ref, controld_globals.fsa_pe_ref, pcmk__str_none)) { ha_msg_input_t fsa_input; controld_stop_sched_timer(); fsa_input.msg = stored_msg; register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input); } else { crm_info("%s calculation %s is obsolete", op, msg_ref); } } else if (strcmp(op, CRM_OP_VOTE) == 0 || strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0 || strcmp(op, CRM_OP_SHUTDOWN) == 0) { } else { const char *host_from = pcmk__xe_get(stored_msg, PCMK__XA_SRC); crm_err("Unexpected response (op=%s, src=%s) sent to the %s", op, host_from, AM_I_DC ? "DC" : "controller"); } } static enum crmd_fsa_input handle_shutdown_request(xmlNode * stored_msg) { /* handle here to avoid potential version issues * where the shutdown message/procedure may have * been changed in later versions. * * This way the DC is always in control of the shutdown */ char *now_s = NULL; const char *host_from = pcmk__xe_get(stored_msg, PCMK__XA_SRC); if (host_from == NULL) { /* we're shutting down and the DC */ host_from = controld_globals.cluster->priv->node_name; } crm_info("Creating shutdown request for %s (state=%s)", host_from, fsa_state2string(controld_globals.fsa_state)); crm_log_xml_trace(stored_msg, "message"); now_s = pcmk__ttoa(time(NULL)); update_attrd(host_from, PCMK__NODE_ATTR_SHUTDOWN, now_s, NULL, FALSE); free(now_s); /* will be picked up by the TE as long as its running */ return I_NULL; } static void send_msg_via_ipc(xmlNode * msg, const char *sys, const char *src) { pcmk__client_t *client_channel = NULL; CRM_CHECK(sys != NULL, return); client_channel = pcmk__find_client_by_id(sys); if (pcmk__xe_get(msg, PCMK__XA_SRC) == NULL) { pcmk__xe_set(msg, PCMK__XA_SRC, src); } if (client_channel != NULL) { /* Transient clients such as crmadmin */ pcmk__ipc_send_xml(client_channel, 0, msg, crm_ipc_server_event); } else if (pcmk__str_eq(sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) { xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); xmlNode *data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); process_te_message(msg, data); } else if (pcmk__str_eq(sys, CRM_SYSTEM_LRMD, pcmk__str_none)) { fsa_data_t fsa_data; ha_msg_input_t fsa_input; xmlNode *wrapper = NULL; fsa_input.msg = msg; wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL); fsa_input.xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL); fsa_data.id = 0; fsa_data.actions = 0; fsa_data.data = &fsa_input; fsa_data.fsa_input = I_MESSAGE; fsa_data.fsa_cause = C_IPC_MESSAGE; fsa_data.origin = __func__; fsa_data.data_type = fsa_dt_ha_msg; do_lrm_invoke(A_LRM_INVOKE, C_IPC_MESSAGE, controld_globals.fsa_state, I_MESSAGE, &fsa_data); } else if (crmd_is_proxy_session(sys)) { crmd_proxy_send(sys, msg); } else { crm_info("Received invalid request: unknown subsystem '%s'", sys); } } void delete_ha_msg_input(ha_msg_input_t * orig) { if (orig == NULL) { return; } pcmk__xml_free(orig->msg); free(orig); } /*! * \internal * \brief Notify the cluster of a remote node state change * * \param[in] node_name Node's name * \param[in] node_up true if node is up, false if down */ void broadcast_remote_state_message(const char *node_name, bool node_up) { xmlNode *msg = pcmk__new_request(pcmk_ipc_controld, CRM_SYSTEM_CRMD, NULL, CRM_SYSTEM_CRMD, CRM_OP_REMOTE_STATE, NULL); crm_info("Notifying cluster of Pacemaker Remote node %s %s", node_name, node_up? "coming up" : "going down"); pcmk__xe_set(msg, PCMK_XA_ID, node_name); - pcmk__xe_set_bool_attr(msg, PCMK__XA_IN_CCM, node_up); + pcmk__xe_set_bool(msg, PCMK__XA_IN_CCM, node_up); if (node_up) { pcmk__xe_set(msg, PCMK__XA_CONNECTION_HOST, controld_globals.cluster->priv->node_name); } pcmk__cluster_send_message(NULL, pcmk_ipc_controld, msg); pcmk__xml_free(msg); } diff --git a/daemons/controld/controld_schedulerd.c b/daemons/controld/controld_schedulerd.c index c3a4be68b0..2c78c50081 100644 --- a/daemons/controld/controld_schedulerd.c +++ b/daemons/controld/controld_schedulerd.c @@ -1,537 +1,537 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include /* pid_t, sleep, ssize_t */ #include #include #include #include #include #include #include #include // xmlXPathObject, etc. #include static void handle_disconnect(void); static pcmk_ipc_api_t *schedulerd_api = NULL; static mainloop_timer_t *controld_cib_retry_timer = NULL; /*! * \internal * \brief Close any scheduler connection and free associated memory */ void controld_shutdown_schedulerd_ipc(void) { controld_clear_fsa_input_flags(R_PE_REQUIRED); pcmk_disconnect_ipc(schedulerd_api); handle_disconnect(); pcmk_free_ipc_api(schedulerd_api); schedulerd_api = NULL; } /*! * \internal * \brief Save CIB query result to file, raising FSA error * * \param[in] msg Ignored * \param[in] call_id Call ID of CIB query * \param[in] rc Return code of CIB query * \param[in] output Result of CIB query * \param[in] user_data Unique identifier for filename * * \note This is intended to be called after a scheduler connection fails. */ static void save_cib_contents(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data) { const char *id = user_data; register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__); CRM_CHECK(id != NULL, return); if (rc == pcmk_ok) { char *filename = pcmk__assert_asprintf(PCMK_SCHEDULER_INPUT_DIR "/pe-core-%s.bz2", id); if (pcmk__xml_write_file(output, filename, true) != pcmk_rc_ok) { crm_err("Could not save Cluster Information Base to %s after scheduler crash", filename); } else { crm_notice("Saved Cluster Information Base to %s after scheduler crash", filename); } free(filename); } } /*! * \internal * \brief Respond to scheduler connection failure */ static void handle_disconnect(void) { // If we aren't connected to the scheduler, we can't expect a reply controld_expect_sched_reply(NULL); if (pcmk__is_set(controld_globals.fsa_input_register, R_PE_REQUIRED)) { int rc = pcmk_ok; char *uuid_str = pcmk__generate_uuid(); crm_crit("Lost connection to the scheduler " QB_XS " CIB will be saved to " PCMK_SCHEDULER_INPUT_DIR "/pe-core-%s.bz2", uuid_str); /* * The scheduler died... * * Save the current CIB so that we have a chance of * figuring out what killed it. * * Delay raising the I_ERROR until the query below completes or * 5s is up, whichever comes first. * */ rc = controld_globals.cib_conn->cmds->query(controld_globals.cib_conn, NULL, NULL, cib_none); fsa_register_cib_callback(rc, uuid_str, save_cib_contents); } controld_clear_fsa_input_flags(R_PE_CONNECTED); controld_trigger_fsa(); return; } static void handle_reply(pcmk_schedulerd_api_reply_t *reply) { const char *msg_ref = NULL; if (!AM_I_DC) { return; } msg_ref = reply->data.graph.reference; if (msg_ref == NULL) { crm_err("%s - Ignoring calculation with no reference", CRM_OP_PECALC); } else if (pcmk__str_eq(msg_ref, controld_globals.fsa_pe_ref, pcmk__str_none)) { ha_msg_input_t fsa_input; xmlNode *crm_data_node; controld_stop_sched_timer(); /* do_te_invoke (which will eventually process the fsa_input we are constructing * here) requires that fsa_input.xml be non-NULL. That will only happen if * copy_ha_msg_input (which is called by register_fsa_input_adv) sees the * fsa_input.msg that it is expecting. The scheduler's IPC dispatch function * gave us the values we need, we just need to put them into XML. * * The name of the top level element here is irrelevant. Nothing checks it. */ fsa_input.msg = pcmk__xe_create(NULL, "dummy-reply"); pcmk__xe_set(fsa_input.msg, PCMK_XA_REFERENCE, msg_ref); pcmk__xe_set(fsa_input.msg, PCMK__XA_CRM_TGRAPH_IN, reply->data.graph.input); crm_data_node = pcmk__xe_create(fsa_input.msg, PCMK__XE_CRM_XML); pcmk__xml_copy(crm_data_node, reply->data.graph.tgraph); register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input); pcmk__xml_free(fsa_input.msg); } else { crm_info("%s calculation %s is obsolete", CRM_OP_PECALC, msg_ref); } } static void scheduler_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data, void *user_data) { pcmk_schedulerd_api_reply_t *reply = event_data; switch (event_type) { case pcmk_ipc_event_disconnect: handle_disconnect(); break; case pcmk_ipc_event_reply: handle_reply(reply); break; default: break; } } static bool new_schedulerd_ipc_connection(void) { int rc; controld_set_fsa_input_flags(R_PE_REQUIRED); if (schedulerd_api == NULL) { rc = pcmk_new_ipc_api(&schedulerd_api, pcmk_ipc_schedulerd); if (rc != pcmk_rc_ok) { crm_err("Error connecting to the scheduler: %s", pcmk_rc_str(rc)); return false; } } pcmk_register_ipc_callback(schedulerd_api, scheduler_event_callback, NULL); rc = pcmk__connect_ipc_retry_conrefused(schedulerd_api, pcmk_ipc_dispatch_main, 3); if (rc != pcmk_rc_ok) { crm_err("Error connecting to %s: %s", pcmk_ipc_name(schedulerd_api, true), pcmk_rc_str(rc)); return false; } controld_set_fsa_input_flags(R_PE_CONNECTED); return true; } static void do_pe_invoke_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *user_data); /* A_PE_START, A_PE_STOP, O_PE_RESTART */ void do_pe_control(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { if (pcmk__is_set(action, A_PE_STOP)) { controld_clear_fsa_input_flags(R_PE_REQUIRED); pcmk_disconnect_ipc(schedulerd_api); handle_disconnect(); } if (pcmk__is_set(action, A_PE_START) && !pcmk__is_set(controld_globals.fsa_input_register, R_PE_CONNECTED)) { if (cur_state == S_STOPPING) { crm_info("Ignoring request to connect to scheduler while shutting down"); } else if (!new_schedulerd_ipc_connection()) { crm_warn("Could not connect to scheduler"); register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL); } } } static int fsa_pe_query = 0; static mainloop_timer_t *controld_sched_timer = NULL; // @TODO Make this a configurable cluster option if there's demand for it #define SCHED_TIMEOUT_MS (120000) /*! * \internal * \brief Handle a timeout waiting for scheduler reply * * \param[in] user_data Ignored * * \return FALSE (indicating that timer should not be restarted) */ static gboolean controld_sched_timeout(gpointer user_data) { if (AM_I_DC) { /* If this node is the DC but can't communicate with the scheduler, just * exit (and likely get fenced) so this node doesn't interfere with any * further DC elections. * * @TODO We could try something less drastic first, like disconnecting * and reconnecting to the scheduler, but something is likely going * seriously wrong, so perhaps it's better to just fail as quickly as * possible. */ crmd_exit(CRM_EX_FATAL); } return FALSE; } void controld_stop_sched_timer(void) { if ((controld_sched_timer != NULL) && (controld_globals.fsa_pe_ref != NULL)) { crm_trace("Stopping timer for scheduler reply %s", controld_globals.fsa_pe_ref); } mainloop_timer_stop(controld_sched_timer); } /*! * \internal * \brief Set the scheduler request currently being waited on * * \param[in] ref Request to expect reply to (or NULL for none) * * \note This function takes ownership of \p ref. */ void controld_expect_sched_reply(char *ref) { if (ref) { if (controld_sched_timer == NULL) { controld_sched_timer = mainloop_timer_add("scheduler_reply_timer", SCHED_TIMEOUT_MS, FALSE, controld_sched_timeout, NULL); } mainloop_timer_start(controld_sched_timer); } else { controld_stop_sched_timer(); } free(controld_globals.fsa_pe_ref); controld_globals.fsa_pe_ref = ref; } /*! * \internal * \brief Free the scheduler reply timer */ void controld_free_sched_timer(void) { if (controld_sched_timer != NULL) { mainloop_timer_del(controld_sched_timer); controld_sched_timer = NULL; } } /* A_PE_INVOKE */ void do_pe_invoke(long long action, enum crmd_fsa_cause cause, enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data) { cib_t *cib_conn = controld_globals.cib_conn; if (AM_I_DC == FALSE) { crm_err("Not invoking scheduler because not DC: %s", fsa_action2string(action)); return; } if (!pcmk__is_set(controld_globals.fsa_input_register, R_PE_CONNECTED)) { if (pcmk__is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) { crm_err("Cannot shut down gracefully without the scheduler"); register_fsa_input_before(C_FSA_INTERNAL, I_TERMINATE, NULL); } else { crm_info("Waiting for the scheduler to connect"); crmd_fsa_stall(FALSE); controld_set_fsa_action_flags(A_PE_START); controld_trigger_fsa(); } return; } if (cur_state != S_POLICY_ENGINE) { crm_notice("Not invoking scheduler because in state %s", fsa_state2string(cur_state)); return; } if (!pcmk__is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) { crm_err("Attempted to invoke scheduler without consistent Cluster Information Base!"); /* start the join from scratch */ register_fsa_input_before(C_FSA_INTERNAL, I_ELECTION, NULL); return; } if (controld_cib_retry_timer != NULL) { crm_debug("Not invoking scheduler until CIB retry timer expires"); return; } fsa_pe_query = cib_conn->cmds->query(cib_conn, NULL, NULL, cib_none); crm_debug("Query %d: Requesting the current CIB: %s", fsa_pe_query, fsa_state2string(controld_globals.fsa_state)); controld_expect_sched_reply(NULL); fsa_register_cib_callback(fsa_pe_query, NULL, do_pe_invoke_callback); } static void force_local_option(xmlNode *xml, const char *attr_name, const char *attr_value) { int max = 0; int lpc = 0; const char *xpath_base = NULL; char *xpath_string = NULL; xmlXPathObject *xpathObj = NULL; xpath_base = pcmk_cib_xpath_for(PCMK_XE_CRM_CONFIG); if (xpath_base == NULL) { crm_err(PCMK_XE_CRM_CONFIG " CIB element not known (bug?)"); return; } xpath_string = pcmk__assert_asprintf("%s//%s//nvpair[@name='%s']", xpath_base, PCMK_XE_CLUSTER_PROPERTY_SET, attr_name); xpathObj = pcmk__xpath_search(xml->doc, xpath_string); max = pcmk__xpath_num_results(xpathObj); free(xpath_string); for (lpc = 0; lpc < max; lpc++) { xmlNode *match = pcmk__xpath_result(xpathObj, lpc); if (match == NULL) { continue; } crm_trace("Forcing %s/%s = %s", pcmk__xe_id(match), attr_name, attr_value); pcmk__xe_set(match, PCMK_XA_VALUE, attr_value); } if(max == 0) { xmlNode *configuration = NULL; xmlNode *crm_config = NULL; xmlNode *cluster_property_set = NULL; crm_trace("Creating %s-%s for %s=%s", PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, attr_name, attr_name, attr_value); configuration = pcmk__xe_first_child(xml, PCMK_XE_CONFIGURATION, NULL, NULL); if (configuration == NULL) { configuration = pcmk__xe_create(xml, PCMK_XE_CONFIGURATION); } crm_config = pcmk__xe_first_child(configuration, PCMK_XE_CRM_CONFIG, NULL, NULL); if (crm_config == NULL) { crm_config = pcmk__xe_create(configuration, PCMK_XE_CRM_CONFIG); } cluster_property_set = pcmk__xe_first_child(crm_config, PCMK_XE_CLUSTER_PROPERTY_SET, NULL, NULL); if (cluster_property_set == NULL) { cluster_property_set = pcmk__xe_create(crm_config, PCMK_XE_CLUSTER_PROPERTY_SET); pcmk__xe_set(cluster_property_set, PCMK_XA_ID, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS); } xml = pcmk__xe_create(cluster_property_set, PCMK_XE_NVPAIR); pcmk__xe_set_id(xml, "%s-%s", PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, attr_name); pcmk__xe_set(xml, PCMK_XA_NAME, attr_name); pcmk__xe_set(xml, PCMK_XA_VALUE, attr_value); } xmlXPathFreeObject(xpathObj); } static gboolean sleep_timer(gpointer data) { controld_set_fsa_action_flags(A_PE_INVOKE); controld_trigger_fsa(); mainloop_timer_del(controld_cib_retry_timer); controld_cib_retry_timer = NULL; return G_SOURCE_REMOVE; } static void do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { char *ref = NULL; pid_t watchdog = pcmk__locate_sbd(); if (rc != pcmk_ok) { crm_err("Could not retrieve the Cluster Information Base: %s " QB_XS " rc=%d call=%d", pcmk_strerror(rc), rc, call_id); register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__); return; } else if (call_id != fsa_pe_query) { crm_trace("Skipping superseded CIB query: %d (current=%d)", call_id, fsa_pe_query); return; } else if (!AM_I_DC || !pcmk__is_set(controld_globals.fsa_input_register, R_PE_CONNECTED)) { crm_debug("No need to invoke the scheduler anymore"); return; } else if (controld_globals.fsa_state != S_POLICY_ENGINE) { crm_debug("Discarding scheduler request in state: %s", fsa_state2string(controld_globals.fsa_state)); return; /* this callback counts as 1 */ } else if (num_cib_op_callbacks() > 1) { crm_debug("Re-asking for the CIB: %d other peer updates still pending", (num_cib_op_callbacks() - 1)); controld_cib_retry_timer = mainloop_timer_add("cib_retry", 1000, false, sleep_timer, NULL); mainloop_timer_start(controld_cib_retry_timer); return; } CRM_LOG_ASSERT(output != NULL); /* Refresh the remote node cache and the known node cache when the * scheduler is invoked */ pcmk__refresh_node_caches_from_cib(output); pcmk__xe_set(output, PCMK_XA_DC_UUID, controld_globals.our_uuid); - pcmk__xe_set_bool_attr(output, PCMK_XA_HAVE_QUORUM, - pcmk__is_set(controld_globals.flags, - controld_has_quorum)); + pcmk__xe_set_bool(output, PCMK_XA_HAVE_QUORUM, + pcmk__is_set(controld_globals.flags, + controld_has_quorum)); force_local_option(output, PCMK_OPT_HAVE_WATCHDOG, pcmk__btoa(watchdog)); if (pcmk__is_set(controld_globals.flags, controld_ever_had_quorum) && !pcmk__cluster_has_quorum()) { pcmk__xe_set_int(output, PCMK_XA_NO_QUORUM_PANIC, 1); } rc = pcmk_schedulerd_api_graph(schedulerd_api, output, &ref); if (rc != pcmk_rc_ok) { free(ref); crm_err("Could not contact the scheduler: %s " QB_XS " rc=%d", pcmk_rc_str(rc), rc); register_fsa_error_adv(C_FSA_INTERNAL, I_ERROR, NULL, NULL, __func__); } else { pcmk__assert(ref != NULL); controld_expect_sched_reply(ref); crm_debug("Invoking the scheduler: query=%d, ref=%s, seq=%llu, " "quorate=%s", fsa_pe_query, controld_globals.fsa_pe_ref, controld_globals.peer_seq, pcmk__flag_text(controld_globals.flags, controld_has_quorum)); } } diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index edbd2a566b..03a00d1bef 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -1,3663 +1,3663 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include // bool #include #include #include #include #include #include #include #include #include #include #include // xmlNode #include // xmlXPathObject, etc. #include #include #include #include #include #include #include #include #include static GHashTable *device_table = NULL; GHashTable *topology = NULL; static GList *cmd_list = NULL; static GHashTable *fenced_handlers = NULL; struct device_search_s { /* target of fence action */ char *host; /* requested fence action */ char *action; /* timeout to use if a device is queried dynamically for possible targets */ // @TODO This name is misleading now, it's the value of stonith-timeout int per_device_timeout; /* number of registered fencing devices at time of request */ int replies_needed; /* number of device replies received so far */ int replies_received; /* whether the target is eligible to perform requested action (or off) */ bool allow_self; /* private data to pass to search callback function */ void *user_data; /* function to call when all replies have been received */ void (*callback) (GList * devices, void *user_data); /* devices capable of performing requested action (or off if remapping) */ GList *capable; /* Whether to perform searches that support the action */ uint32_t support_action_only; }; static gboolean stonith_device_dispatch(gpointer user_data); static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data); static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence); static int get_agent_metadata(const char *agent, xmlNode **metadata); static void read_action_metadata(fenced_device_t *device); static enum fenced_target_by unpack_level_kind(const xmlNode *level); typedef struct { int id; uint32_t options; int default_timeout; /* seconds */ int timeout; /* seconds */ int start_delay; // seconds (-1 means disable static/random fencing delays) int delay_id; char *op; char *origin; char *client; char *client_name; char *remote_op_id; char *target; char *action; char *device; //! Head of device list (used only for freeing list with command object) GList *device_list; //! Next item to process in \c device_list GList *next_device_iter; void *internal_user_data; void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data); fenced_device_t *active_on; fenced_device_t *activating_on; } async_command_t; static xmlNode *construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result); /*! * \internal * \brief Set a bad fencer API request error in a result object * * \param[out] result Result to set */ static inline void set_bad_request_result(pcmk__action_result_t *result) { pcmk__set_result(result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Fencer API request missing required information (bug?)"); } /*! * \internal * \brief Check whether the fencer's device table contains a watchdog device * * \retval \c true If the device table contains a watchdog device * \retval \c false Otherwise */ bool fenced_has_watchdog_device(void) { return (device_table != NULL) && (g_hash_table_lookup(device_table, STONITH_WATCHDOG_ID) != NULL); } /*! * \internal * \brief Call a function for each known fence device * * \param[in] fn Function to call for each device * \param[in,out] user_data User data */ void fenced_foreach_device(GHFunc fn, gpointer user_data) { if (device_table != NULL) { g_hash_table_foreach(device_table, fn, user_data); } } /*! * \internal * \brief Remove each known fence device matching a given predicate * * \param[in] fn Function that returns \c TRUE to remove a fence device or * \c FALSE to keep it */ void fenced_foreach_device_remove(GHRFunc fn) { if (device_table != NULL) { g_hash_table_foreach_remove(device_table, fn, NULL); } } static gboolean is_action_required(const char *action, const fenced_device_t *device) { return (device != NULL) && pcmk__is_set(device->flags, fenced_df_auto_unfence) && pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none); } static int get_action_delay_max(const fenced_device_t *device, const char *action) { const char *value = NULL; guint delay_max = 0U; if (!pcmk__is_fencing_action(action)) { return 0; } value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_MAX); if (value) { pcmk_parse_interval_spec(value, &delay_max); delay_max /= 1000; } return (int) delay_max; } static int get_action_delay_base(const fenced_device_t *device, const char *action, const char *target) { char *hash_value = NULL; guint delay_base = 0U; if (!pcmk__is_fencing_action(action)) { return 0; } hash_value = g_hash_table_lookup(device->params, PCMK_STONITH_DELAY_BASE); if (hash_value) { char *value = pcmk__str_copy(hash_value); char *valptr = value; if (target != NULL) { for (char *val = strtok(value, "; \t"); val != NULL; val = strtok(NULL, "; \t")) { char *mapval = strchr(val, ':'); if (mapval == NULL || mapval[1] == 0) { crm_err("pcmk_delay_base: empty value in mapping", val); continue; } if (mapval != val && strncasecmp(target, val, (size_t)(mapval - val)) == 0) { value = mapval + 1; crm_debug("pcmk_delay_base mapped to %s for %s", value, target); break; } } } if (strchr(value, ':') == 0) { pcmk_parse_interval_spec(value, &delay_base); delay_base /= 1000; } free(valptr); } return (int) delay_base; } /*! * \internal * \brief Override STONITH timeout with pcmk_*_timeout if available * * \param[in] device STONITH device to use * \param[in] action STONITH action name * \param[in] default_timeout Timeout to use if device does not have * a pcmk_*_timeout parameter for action * * \return Value of pcmk_(action)_timeout if available, otherwise default_timeout * \note For consistency, it would be nice if reboot/off/on timeouts could be * set the same way as start/stop/monitor timeouts, i.e. with an * entry in the fencing resource configuration. However that * is insufficient because fencing devices may be registered directly via * the fencer's register_device() API instead of going through the CIB * (e.g. stonith_admin uses it for its -R option, and the executor uses it * to ensure a device is registered when a command is issued). As device * properties, pcmk_*_timeout parameters can be grabbed by the fencer when * the device is registered, whether by CIB change or API call. */ static int get_action_timeout(const fenced_device_t *device, const char *action, int default_timeout) { if (action && device && device->params) { char *timeout_param = NULL; const char *value = NULL; /* If "reboot" was requested but the device does not support it, * we will remap to "off", so check timeout for "off" instead */ if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk__is_set(device->flags, fenced_df_supports_reboot)) { crm_trace("%s doesn't support reboot, using timeout for off instead", device->id); action = PCMK_ACTION_OFF; } /* If the device config specified an action-specific timeout, use it */ timeout_param = pcmk__assert_asprintf("pcmk_%s_timeout", action); value = g_hash_table_lookup(device->params, timeout_param); free(timeout_param); if (value) { long long timeout_ms = 0; if ((pcmk__parse_ms(value, &timeout_ms) == pcmk_rc_ok) && (timeout_ms >= 0)) { int timeout_sec = 0; timeout_ms = QB_MIN(timeout_ms, UINT_MAX); timeout_sec = pcmk__timeout_ms2s((guint) timeout_ms); return QB_MIN(timeout_sec, INT_MAX); } } } return default_timeout; } /*! * \internal * \brief Get the currently executing device for a fencing operation * * \param[in] cmd Fencing operation to check * * \return Currently executing device for \p cmd if any, otherwise NULL */ static fenced_device_t * cmd_device(const async_command_t *cmd) { if ((cmd == NULL) || (cmd->device == NULL) || (device_table == NULL)) { return NULL; } return g_hash_table_lookup(device_table, cmd->device); } /*! * \internal * \brief Return the configured reboot action for a given device * * \param[in] device_id Device ID * * \return Configured reboot action for \p device_id */ const char * fenced_device_reboot_action(const char *device_id) { const char *action = NULL; if ((device_table != NULL) && (device_id != NULL)) { fenced_device_t *device = g_hash_table_lookup(device_table, device_id); if ((device != NULL) && (device->params != NULL)) { action = g_hash_table_lookup(device->params, "pcmk_reboot_action"); } } return pcmk__s(action, PCMK_ACTION_REBOOT); } /*! * \internal * \brief Check whether a given device supports the "on" action * * \param[in] device_id Device ID * * \return true if \p device_id supports "on", otherwise false */ bool fenced_device_supports_on(const char *device_id) { if ((device_table != NULL) && (device_id != NULL)) { fenced_device_t *device = g_hash_table_lookup(device_table, device_id); if (device != NULL) { return pcmk__is_set(device->flags, fenced_df_supports_on); } } return false; } static void free_async_command(async_command_t * cmd) { if (!cmd) { return; } if (cmd->delay_id) { g_source_remove(cmd->delay_id); } cmd_list = g_list_remove(cmd_list, cmd); g_list_free_full(cmd->device_list, free); free(cmd->device); free(cmd->action); free(cmd->target); free(cmd->remote_op_id); free(cmd->client); free(cmd->client_name); free(cmd->origin); free(cmd->op); free(cmd); } /*! * \internal * \brief Create a new asynchronous fencing operation from request XML * * \param[in] msg Fencing request XML (from IPC or CPG) * * \return Newly allocated fencing operation on success, otherwise NULL * * \note This asserts on memory errors, so a NULL return indicates an * unparseable message. */ static async_command_t * create_async_command(xmlNode *msg) { xmlNode *op = NULL; async_command_t *cmd = NULL; int rc = pcmk_rc_ok; if (msg == NULL) { return NULL; } op = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_ERR); if (op == NULL) { return NULL; } cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); // All messages must include these cmd->action = pcmk__xe_get_copy(op, PCMK__XA_ST_DEVICE_ACTION); cmd->op = pcmk__xe_get_copy(msg, PCMK__XA_ST_OP); cmd->client = pcmk__xe_get_copy(msg, PCMK__XA_ST_CLIENTID); if ((cmd->action == NULL) || (cmd->op == NULL) || (cmd->client == NULL)) { free_async_command(cmd); return NULL; } pcmk__xe_get_int(msg, PCMK__XA_ST_CALLID, &(cmd->id)); pcmk__xe_get_int(msg, PCMK__XA_ST_DELAY, &(cmd->start_delay)); pcmk__xe_get_int(msg, PCMK__XA_ST_TIMEOUT, &(cmd->default_timeout)); cmd->timeout = cmd->default_timeout; rc = pcmk__xe_get_flags(msg, PCMK__XA_ST_CALLOPT, &(cmd->options), st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request: %s", pcmk_rc_str(rc)); } cmd->origin = pcmk__xe_get_copy(msg, PCMK__XA_SRC); cmd->remote_op_id = pcmk__xe_get_copy(msg, PCMK__XA_ST_REMOTE_OP); cmd->client_name = pcmk__xe_get_copy(msg, PCMK__XA_ST_CLIENTNAME); cmd->target = pcmk__xe_get_copy(op, PCMK__XA_ST_TARGET); cmd->device = pcmk__xe_get_copy(op, PCMK__XA_ST_DEVICE_ID); cmd->done_cb = st_child_done; // Track in global command list cmd_list = g_list_append(cmd_list, cmd); return cmd; } static int get_action_limit(fenced_device_t *device) { const char *value = NULL; int action_limit = 1; value = g_hash_table_lookup(device->params, PCMK_STONITH_ACTION_LIMIT); if ((value == NULL) || (pcmk__scan_min_int(value, &action_limit, INT_MIN) != pcmk_rc_ok) || (action_limit == 0)) { action_limit = 1; } return action_limit; } static int get_active_cmds(fenced_device_t *device) { int counter = 0; GList *gIter = NULL; GList *gIterNext = NULL; CRM_CHECK(device != NULL, return 0); for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) { async_command_t *cmd = gIter->data; gIterNext = gIter->next; if (cmd->active_on == device) { counter++; } } return counter; } static void fork_cb(int pid, void *user_data) { async_command_t *cmd = (async_command_t *) user_data; fenced_device_t *device = cmd->activating_on; if (device == NULL) { /* In case of a retry, we've done the move from activating_on to * active_on already */ device = cmd->active_on; } pcmk__assert(device != NULL); crm_debug("Operation '%s' [%d]%s%s using %s now running with %ds timeout", cmd->action, pid, ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, cmd->timeout); cmd->active_on = device; cmd->activating_on = NULL; } static int get_agent_metadata_cb(gpointer data) { fenced_device_t *device = data; guint period_ms; switch (get_agent_metadata(device->agent, &device->agent_metadata)) { case pcmk_rc_ok: if (device->agent_metadata) { read_action_metadata(device); device->default_host_arg = stonith__default_host_arg(device->agent_metadata); } return G_SOURCE_REMOVE; case EAGAIN: period_ms = pcmk__mainloop_timer_get_period(device->timer); if (period_ms < 160 * 1000) { mainloop_timer_set_period(device->timer, 2 * period_ms); } return G_SOURCE_CONTINUE; default: return G_SOURCE_REMOVE; } } /*! * \internal * \brief Call a command's action callback for an internal (not library) result * * \param[in,out] cmd Command to report result for * \param[in] execution_status Execution status to use for result * \param[in] exit_status Exit status to use for result * \param[in] exit_reason Exit reason to use for result */ static void report_internal_result(async_command_t *cmd, int exit_status, int execution_status, const char *exit_reason) { pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; pcmk__set_result(&result, exit_status, execution_status, exit_reason); cmd->done_cb(0, &result, cmd); pcmk__reset_result(&result); } static gboolean stonith_device_execute(fenced_device_t *device) { int exec_rc = 0; const char *action_str = NULL; async_command_t *cmd = NULL; stonith_action_t *action = NULL; int active_cmds = 0; int action_limit = 0; GList *gIter = NULL; GList *gIterNext = NULL; CRM_CHECK(device != NULL, return FALSE); active_cmds = get_active_cmds(device); action_limit = get_action_limit(device); if (action_limit > -1 && active_cmds >= action_limit) { crm_trace("%s is over its action limit of %d (%u active action%s)", device->id, action_limit, active_cmds, pcmk__plural_s(active_cmds)); return TRUE; } for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) { async_command_t *pending_op = gIter->data; gIterNext = gIter->next; if (pending_op && pending_op->delay_id) { crm_trace("Operation '%s'%s%s using %s was asked to run too early, " "waiting for start delay of %ds", pending_op->action, ((pending_op->target == NULL)? "" : " targeting "), pcmk__s(pending_op->target, ""), device->id, pending_op->start_delay); continue; } device->pending_ops = g_list_remove_link(device->pending_ops, gIter); g_list_free_1(gIter); cmd = pending_op; break; } if (cmd == NULL) { crm_trace("No actions using %s are needed", device->id); return TRUE; } if (pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { if (pcmk__is_fencing_action(cmd->action)) { if (node_does_watchdog_fencing(fenced_get_local_node())) { pcmk__panic("Watchdog self-fencing required"); goto done; } } else { crm_info("Faking success for %s watchdog operation", cmd->action); report_internal_result(cmd, CRM_EX_OK, PCMK_EXEC_DONE, NULL); goto done; } } #if PCMK__ENABLE_CIBSECRETS exec_rc = pcmk__substitute_secrets(device->id, device->params); if (exec_rc != pcmk_rc_ok) { if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) { crm_info("Proceeding with stop operation for %s " "despite being unable to load CIB secrets (%s)", device->id, pcmk_rc_str(exec_rc)); } else { crm_err("Considering %s unconfigured " "because unable to load CIB secrets: %s", device->id, pcmk_rc_str(exec_rc)); report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_SECRETS, "Failed to get CIB secrets"); goto done; } } #endif action_str = cmd->action; if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none) && !pcmk__is_set(device->flags, fenced_df_supports_reboot)) { crm_notice("Remapping 'reboot' action%s%s using %s to 'off' " "because agent '%s' does not support reboot", ((cmd->target == NULL)? "" : " targeting "), pcmk__s(cmd->target, ""), device->id, device->agent); action_str = PCMK_ACTION_OFF; } action = stonith__action_create(device->agent, action_str, cmd->target, cmd->timeout, device->params, device->aliases, device->default_host_arg); /* for async exec, exec_rc is negative for early error exit otherwise handling of success/errors is done via callbacks */ cmd->activating_on = device; exec_rc = stonith__execute_async(action, (void *)cmd, cmd->done_cb, fork_cb); if (exec_rc < 0) { cmd->activating_on = NULL; cmd->done_cb(0, stonith__action_result(action), cmd); stonith__destroy_action(action); } done: /* Device might get triggered to work by multiple fencing commands * simultaneously. Trigger the device again to make sure any * remaining concurrent commands get executed. */ if (device->pending_ops) { mainloop_set_trigger(device->work); } return TRUE; } static gboolean stonith_device_dispatch(gpointer user_data) { return stonith_device_execute(user_data); } static gboolean start_delay_helper(gpointer data) { async_command_t *cmd = data; fenced_device_t *device = cmd_device(cmd); cmd->delay_id = 0; if (device) { mainloop_set_trigger(device->work); } return FALSE; } static void schedule_stonith_command(async_command_t *cmd, fenced_device_t *device) { int delay_max = 0; int delay_base = 0; int requested_delay = cmd->start_delay; CRM_CHECK(cmd != NULL, return); CRM_CHECK(device != NULL, return); if (cmd->device) { free(cmd->device); } cmd->device = pcmk__str_copy(device->id); cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout); if (cmd->remote_op_id) { crm_debug("Scheduling '%s' action%s%s using %s for remote peer %s " "with op id %.8s and timeout %ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->origin, cmd->remote_op_id, cmd->timeout); } else { crm_debug("Scheduling '%s' action%s%s using %s for %s with timeout %ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->client, cmd->timeout); } device->pending_ops = g_list_append(device->pending_ops, cmd); mainloop_set_trigger(device->work); // Value -1 means disable any static/random fencing delays if (requested_delay < 0) { return; } delay_max = get_action_delay_max(device, cmd->action); delay_base = get_action_delay_base(device, cmd->action, cmd->target); if (delay_max == 0) { delay_max = delay_base; } if (delay_max < delay_base) { crm_warn(PCMK_STONITH_DELAY_BASE " (%ds) is larger than " PCMK_STONITH_DELAY_MAX " (%ds) for %s using %s " "(limiting to maximum delay)", delay_base, delay_max, cmd->action, device->id); delay_base = delay_max; } if (delay_max > 0) { cmd->start_delay += delay_base; // Add random offset so that delay_base <= cmd->start_delay <= delay_max if (delay_max > delay_base) { // coverity[dont_call] Doesn't matter that rand() is predictable cmd->start_delay += rand() % (delay_max - delay_base + 1); } } if (cmd->start_delay > 0) { crm_notice("Delaying '%s' action%s%s using %s for %ds " QB_XS " timeout=%ds requested_delay=%ds base=%ds max=%ds", cmd->action, (cmd->target == NULL)? "" : " targeting ", pcmk__s(cmd->target, ""), device->id, cmd->start_delay, cmd->timeout, requested_delay, delay_base, delay_max); cmd->delay_id = pcmk__create_timer(cmd->start_delay * 1000, start_delay_helper, cmd); } } static void free_device(gpointer data) { GList *gIter = NULL; fenced_device_t *device = data; g_hash_table_destroy(device->params); g_hash_table_destroy(device->aliases); for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) { async_command_t *cmd = gIter->data; crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action); report_internal_result(cmd, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Device was removed before action could be executed"); } g_list_free(device->pending_ops); g_list_free_full(device->targets, free); if (device->timer) { mainloop_timer_stop(device->timer); mainloop_timer_del(device->timer); } mainloop_destroy_trigger(device->work); pcmk__xml_free(device->agent_metadata); free(device->namespace); if (device->on_target_actions != NULL) { g_string_free(device->on_target_actions, TRUE); } free(device->agent); free(device->id); free(device); } /*! * \internal * \brief Initialize the table of known fence devices */ void fenced_init_device_table(void) { if (device_table == NULL) { device_table = pcmk__strkey_table(NULL, free_device); } } /*! * \internal * \brief Free the table of known fence devices */ void fenced_free_device_table(void) { if (device_table != NULL) { g_hash_table_destroy(device_table); device_table = NULL; } } static GHashTable * build_port_aliases(const char *hostmap, GList ** targets) { char *name = NULL; int last = 0, lpc = 0, max = 0, added = 0; GHashTable *aliases = pcmk__strikey_table(free, free); if (hostmap == NULL) { return aliases; } max = strlen(hostmap); for (; lpc <= max; lpc++) { switch (hostmap[lpc]) { /* Skip escaped chars */ case '\\': lpc++; break; /* Assignment chars */ case '=': case ':': if (lpc > last) { free(name); name = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(name, hostmap + last, lpc - last); } last = lpc + 1; break; /* Delimeter chars */ /* case ',': Potentially used to specify multiple ports */ case 0: case ';': case ' ': case '\t': if (name) { char *value = NULL; int k = 0; value = pcmk__assert_alloc(1, 1 + lpc - last); memcpy(value, hostmap + last, lpc - last); for (int i = 0; value[i] != '\0'; i++) { if (value[i] != '\\') { value[k++] = value[i]; } } value[k] = '\0'; crm_debug("Adding alias '%s'='%s'", name, value); g_hash_table_replace(aliases, name, value); if (targets) { *targets = g_list_append(*targets, pcmk__str_copy(value)); } value = NULL; name = NULL; added++; } else if (lpc > last) { crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last); } last = lpc + 1; break; } if (hostmap[lpc] == 0) { break; } } if (added == 0) { crm_info("No host mappings detected in '%s'", hostmap); } free(name); return aliases; } GHashTable *metadata_cache = NULL; void free_metadata_cache(void) { if (metadata_cache != NULL) { g_hash_table_destroy(metadata_cache); metadata_cache = NULL; } } static void init_metadata_cache(void) { if (metadata_cache == NULL) { metadata_cache = pcmk__strkey_table(free, free); } } int get_agent_metadata(const char *agent, xmlNode ** metadata) { char *buffer = NULL; if (metadata == NULL) { return EINVAL; } *metadata = NULL; if (pcmk__str_eq(agent, STONITH_WATCHDOG_AGENT_INTERNAL, pcmk__str_none)) { return pcmk_rc_ok; } init_metadata_cache(); buffer = g_hash_table_lookup(metadata_cache, agent); if (buffer == NULL) { stonith_t *st = stonith__api_new(); int rc; if (st == NULL) { crm_warn("Could not get agent meta-data: " "API memory allocation failed"); return EAGAIN; } rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10); stonith__api_free(st); if (rc || !buffer) { crm_err("Could not retrieve metadata for fencing agent %s", agent); return EAGAIN; } g_hash_table_replace(metadata_cache, pcmk__str_copy(agent), buffer); } *metadata = pcmk__xml_parse(buffer); return pcmk_rc_ok; } static void read_action_metadata(fenced_device_t *device) { xmlXPathObject *xpath = NULL; int max = 0; int lpc = 0; if (device->agent_metadata == NULL) { return; } xpath = pcmk__xpath_search(device->agent_metadata->doc, "//" PCMK_XE_ACTION); max = pcmk__xpath_num_results(xpath); if (max == 0) { xmlXPathFreeObject(xpath); return; } for (lpc = 0; lpc < max; lpc++) { const char *action = NULL; xmlNode *match = pcmk__xpath_result(xpath, lpc); CRM_LOG_ASSERT(match != NULL); if(match == NULL) { continue; }; action = pcmk__xe_get(match, PCMK_XA_NAME); if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { fenced_device_set_flags(device, fenced_df_supports_list); } else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) { fenced_device_set_flags(device, fenced_df_supports_status); } else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { fenced_device_set_flags(device, fenced_df_supports_reboot); } else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { /* PCMK_XA_AUTOMATIC means the cluster will unfence a node when it * joins. * * @COMPAT PCMK__XA_REQUIRED is a deprecated synonym for * PCMK_XA_AUTOMATIC. */ if (pcmk__xe_attr_is_true(match, PCMK_XA_AUTOMATIC) || pcmk__xe_attr_is_true(match, PCMK__XA_REQUIRED)) { fenced_device_set_flags(device, fenced_df_auto_unfence); } fenced_device_set_flags(device, fenced_df_supports_on); } if ((action != NULL) && pcmk__xe_attr_is_true(match, PCMK_XA_ON_TARGET)) { pcmk__add_word(&(device->on_target_actions), 64, action); } } xmlXPathFreeObject(xpath); } static const char * target_list_type(fenced_device_t *dev) { const char *check_type = NULL; check_type = g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK); if (check_type == NULL) { if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_LIST)) { check_type = PCMK_VALUE_STATIC_LIST; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP)) { check_type = PCMK_VALUE_STATIC_LIST; } else if (pcmk__is_set(dev->flags, fenced_df_supports_list)) { check_type = PCMK_VALUE_DYNAMIC_LIST; } else if (pcmk__is_set(dev->flags, fenced_df_supports_status)) { check_type = PCMK_VALUE_STATUS; } else { check_type = PCMK_VALUE_NONE; } } return check_type; } static fenced_device_t * build_device_from_xml(const xmlNode *dev) { const char *value; fenced_device_t *device = NULL; char *agent = pcmk__xe_get_copy(dev, PCMK_XA_AGENT); CRM_CHECK(agent != NULL, return device); device = pcmk__assert_alloc(1, sizeof(fenced_device_t)); device->id = pcmk__xe_get_copy(dev, PCMK_XA_ID); device->agent = agent; device->namespace = pcmk__xe_get_copy(dev, PCMK__XA_NAMESPACE); device->params = xml2list(dev); value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_LIST); if (value) { device->targets = stonith__parse_targets(value); } value = g_hash_table_lookup(device->params, PCMK_STONITH_HOST_MAP); device->aliases = build_port_aliases(value, &(device->targets)); value = target_list_type(device); if (!pcmk__str_eq(value, PCMK_VALUE_STATIC_LIST, pcmk__str_casei) && (device->targets != NULL)) { // device->targets is necessary only with PCMK_VALUE_STATIC_LIST g_list_free_full(device->targets, free); device->targets = NULL; } switch (get_agent_metadata(device->agent, &device->agent_metadata)) { case pcmk_rc_ok: if (device->agent_metadata) { read_action_metadata(device); device->default_host_arg = stonith__default_host_arg(device->agent_metadata); } break; case EAGAIN: if (device->timer == NULL) { device->timer = mainloop_timer_add("get_agent_metadata", 10 * 1000, TRUE, get_agent_metadata_cb, device); } if (!mainloop_timer_running(device->timer)) { mainloop_timer_start(device->timer); } break; default: break; } value = pcmk__xe_get(dev, PCMK__XA_RSC_PROVIDES); if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) { fenced_device_set_flags(device, fenced_df_auto_unfence); } if (is_action_required(PCMK_ACTION_ON, device)) { crm_info("Fencing device '%s' requires unfencing", device->id); } if (device->on_target_actions != NULL) { crm_info("Fencing device '%s' requires actions (%s) to be executed " "on target", device->id, (const char *) device->on_target_actions->str); } device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device); return device; } static void schedule_internal_command(const char *origin, fenced_device_t *device, const char *action, const char *target, int timeout, void *internal_user_data, void (*done_cb) (int pid, const pcmk__action_result_t *result, void *user_data)) { async_command_t *cmd = NULL; cmd = pcmk__assert_alloc(1, sizeof(async_command_t)); cmd->id = -1; cmd->default_timeout = timeout ? timeout : 60; cmd->timeout = cmd->default_timeout; cmd->action = pcmk__str_copy(action); cmd->target = pcmk__str_copy(target); cmd->device = pcmk__str_copy(device->id); cmd->origin = pcmk__str_copy(origin); cmd->client = pcmk__str_copy(crm_system_name); cmd->client_name = pcmk__str_copy(crm_system_name); cmd->internal_user_data = internal_user_data; cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */ schedule_stonith_command(cmd, device); } // Fence agent status commands use custom exit status codes enum fence_status_code { fence_status_invalid = -1, fence_status_active = 0, fence_status_unknown = 1, fence_status_inactive = 2, }; static void status_search_cb(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; fenced_device_t *dev = cmd_device(cmd); gboolean can = FALSE; free_async_command(cmd); if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (result->execution_status != PCMK_EXEC_DONE) { crm_warn("Assuming %s cannot fence %s " "because status could not be executed: %s%s%s%s", dev->id, search->host, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); search_devices_record_result(search, dev->id, FALSE); return; } switch (result->exit_status) { case fence_status_unknown: crm_trace("%s reported it cannot fence %s", dev->id, search->host); break; case fence_status_active: case fence_status_inactive: crm_trace("%s reported it can fence %s", dev->id, search->host); can = TRUE; break; default: crm_warn("Assuming %s cannot fence %s " "(status returned unknown code %d)", dev->id, search->host, result->exit_status); break; } search_devices_record_result(search, dev->id, can); } static void dynamic_list_search_cb(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; struct device_search_s *search = cmd->internal_user_data; fenced_device_t *dev = cmd_device(cmd); gboolean can_fence = FALSE; free_async_command(cmd); /* Host/alias must be in the list output to be eligible to be fenced * * Will cause problems if down'd nodes aren't listed or (for virtual nodes) * if the guest is still listed despite being moved to another machine */ if (!dev) { search_devices_record_result(search, NULL, FALSE); return; } mainloop_set_trigger(dev->work); if (pcmk__result_ok(result)) { crm_info("Refreshing target list for %s", dev->id); g_list_free_full(dev->targets, free); dev->targets = stonith__parse_targets(result->action_stdout); dev->targets_age = time(NULL); } else if (dev->targets != NULL) { if (result->execution_status == PCMK_EXEC_DONE) { crm_info("Reusing most recent target list for %s " "because list returned error code %d", dev->id, result->exit_status); } else { crm_info("Reusing most recent target list for %s " "because list could not be executed: %s%s%s%s", dev->id, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); } } else { // We have never successfully executed list if (result->execution_status == PCMK_EXEC_DONE) { crm_warn("Assuming %s cannot fence %s " "because list returned error code %d", dev->id, search->host, result->exit_status); } else { crm_warn("Assuming %s cannot fence %s " "because list could not be executed: %s%s%s%s", dev->id, search->host, pcmk_exec_status_str(result->execution_status), ((result->exit_reason == NULL)? "" : " ("), ((result->exit_reason == NULL)? "" : result->exit_reason), ((result->exit_reason == NULL)? "" : ")")); } /* Fall back to pcmk_host_check=PCMK_VALUE_STATUS if the user didn't * explicitly specify PCMK_VALUE_DYNAMIC_LIST */ if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_CHECK) == NULL) { crm_notice("Switching to pcmk_host_check='status' for %s", dev->id); pcmk__insert_dup(dev->params, PCMK_STONITH_HOST_CHECK, PCMK_VALUE_STATUS); } } if (dev->targets) { const char *alias = g_hash_table_lookup(dev->aliases, search->host); if (!alias) { alias = search->host; } if (pcmk__str_in_list(alias, dev->targets, pcmk__str_casei)) { can_fence = TRUE; } } search_devices_record_result(search, dev->id, can_fence); } /*! * \internal * \brief Returns true if any key in first is not in second or second has a different value for key */ static int device_params_diff(GHashTable *first, GHashTable *second) { char *key = NULL; char *value = NULL; GHashTableIter gIter; g_hash_table_iter_init(&gIter, first); while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) { if(strstr(key, "CRM_meta") == key) { continue; } else if (strcmp(key, PCMK_XA_CRM_FEATURE_SET) == 0) { continue; } else { char *other_value = g_hash_table_lookup(second, key); if (!other_value || !pcmk__str_eq(other_value, value, pcmk__str_casei)) { crm_trace("Different value for %s: %s != %s", key, other_value, value); return 1; } } } return 0; } /*! * \internal * \brief Checks to see if an identical device already exists in the table */ static fenced_device_t * device_has_duplicate(const fenced_device_t *device) { fenced_device_t *dup = g_hash_table_lookup(device_table, device->id); if (!dup) { crm_trace("No match for %s", device->id); return NULL; } else if (!pcmk__str_eq(dup->agent, device->agent, pcmk__str_casei)) { crm_trace("Different agent: %s != %s", dup->agent, device->agent); return NULL; } // Find a way to share logic with pcmk__digest_op_params() here? if (device_params_diff(device->params, dup->params) || device_params_diff(dup->params, device->params)) { return NULL; } crm_trace("Match"); return dup; } int fenced_device_register(const xmlNode *dev, bool from_cib) { const char *local_node_name = fenced_get_local_node(); fenced_device_t *dup = NULL; fenced_device_t *device = build_device_from_xml(dev); int rc = pcmk_rc_ok; CRM_CHECK(device != NULL, return ENOMEM); /* do we have a watchdog-device? */ if (pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none) || pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { if (stonith_watchdog_timeout_ms <= 0) { crm_err("Ignoring watchdog fence device without " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " set"); rc = ENODEV; goto done; } if (!pcmk__str_any_of(device->agent, STONITH_WATCHDOG_AGENT, STONITH_WATCHDOG_AGENT_INTERNAL, NULL)) { crm_err("Ignoring watchdog fence device with unknown agent '%s' " "rather than '" STONITH_WATCHDOG_AGENT "'", pcmk__s(device->agent, "")); rc = ENODEV; goto done; } if (!pcmk__str_eq(device->id, STONITH_WATCHDOG_ID, pcmk__str_none)) { crm_err("Ignoring watchdog fence device named '%s' rather than " "'" STONITH_WATCHDOG_ID "'", pcmk__s(device->id, "")); rc = ENODEV; goto done; } if (pcmk__str_eq(device->agent, STONITH_WATCHDOG_AGENT, pcmk__str_none)) { /* This has either an empty list or the targets configured for * watchdog fencing */ g_list_free_full(stonith_watchdog_targets, free); stonith_watchdog_targets = device->targets; device->targets = NULL; } if (!node_does_watchdog_fencing(local_node_name)) { crm_debug("Skip registration of watchdog fence device on node not " "in host list"); device->targets = NULL; stonith_device_remove(device->id, from_cib); goto done; } // Proceed as with any other fencing device g_list_free_full(device->targets, free); device->targets = stonith__parse_targets(local_node_name); pcmk__insert_dup(device->params, PCMK_STONITH_HOST_LIST, local_node_name); } dup = device_has_duplicate(device); if (dup != NULL) { guint ndevices = g_hash_table_size(device_table); crm_debug("Device '%s' already in device list (%d active device%s)", device->id, ndevices, pcmk__plural_s(ndevices)); free_device(device); device = dup; fenced_device_clear_flags(device, fenced_df_dirty); } else { guint ndevices = 0; fenced_device_t *old = g_hash_table_lookup(device_table, device->id); if (from_cib && (old != NULL) && pcmk__is_set(old->flags, fenced_df_api_registered)) { /* If the CIB is writing over an entry that is shared with a stonith * client, copy any pending ops that currently exist on the old * entry to the new one. Otherwise the pending ops will be reported * as failures. */ crm_info("Overwriting existing entry for %s from CIB", device->id); device->pending_ops = old->pending_ops; fenced_device_set_flags(device, fenced_df_api_registered); old->pending_ops = NULL; if (device->pending_ops != NULL) { mainloop_set_trigger(device->work); } } g_hash_table_replace(device_table, device->id, device); ndevices = g_hash_table_size(device_table); crm_notice("Added '%s' to device list (%d active device%s)", device->id, ndevices, pcmk__plural_s(ndevices)); } if (from_cib) { fenced_device_set_flags(device, fenced_df_cib_registered); } else { fenced_device_set_flags(device, fenced_df_api_registered); } done: if (rc != pcmk_rc_ok) { free_device(device); } return rc; } void stonith_device_remove(const char *id, bool from_cib) { fenced_device_t *device = g_hash_table_lookup(device_table, id); guint ndevices = 0; if (device == NULL) { ndevices = g_hash_table_size(device_table); crm_info("Device '%s' not found (%u active device%s)", id, ndevices, pcmk__plural_s(ndevices)); return; } if (from_cib) { fenced_device_clear_flags(device, fenced_df_cib_registered); } else { fenced_device_clear_flags(device, fenced_df_api_registered|fenced_df_verified); } if (!pcmk__any_flags_set(device->flags, fenced_df_api_registered |fenced_df_cib_registered)) { g_hash_table_remove(device_table, id); ndevices = g_hash_table_size(device_table); crm_info("Removed '%s' from device list (%u active device%s)", id, ndevices, pcmk__plural_s(ndevices)); } else { // Exactly one is true at this point const bool cib_registered = pcmk__is_set(device->flags, fenced_df_cib_registered); crm_trace("Not removing '%s' from device list (%u active) because " "still registered via %s", id, g_hash_table_size(device_table), (cib_registered? "CIB" : "API")); } } /*! * \internal * \brief Return the number of stonith levels registered for a node * * \param[in] tp Node's topology table entry * * \return Number of non-NULL levels in topology entry * \note This function is used only for log messages. */ static int count_active_levels(const stonith_topology_t *tp) { int lpc = 0; int count = 0; for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) { if (tp->levels[lpc] != NULL) { count++; } } return count; } static void free_topology_entry(gpointer data) { stonith_topology_t *tp = data; int lpc = 0; for (lpc = 0; lpc < ST__LEVEL_COUNT; lpc++) { if (tp->levels[lpc] != NULL) { g_list_free_full(tp->levels[lpc], free); } } free(tp->target); free(tp->target_value); free(tp->target_pattern); free(tp->target_attribute); free(tp); } void free_topology_list(void) { if (topology != NULL) { g_hash_table_destroy(topology); topology = NULL; } } void init_topology_list(void) { if (topology == NULL) { topology = pcmk__strkey_table(NULL, free_topology_entry); } } char * stonith_level_key(const xmlNode *level, enum fenced_target_by mode) { if (mode == fenced_target_by_unknown) { mode = unpack_level_kind(level); } switch (mode) { case fenced_target_by_name: return pcmk__xe_get_copy(level, PCMK_XA_TARGET); case fenced_target_by_pattern: return pcmk__xe_get_copy(level, PCMK_XA_TARGET_PATTERN); case fenced_target_by_attribute: return pcmk__assert_asprintf("%s=%s", pcmk__xe_get(level, PCMK_XA_TARGET_ATTRIBUTE), pcmk__xe_get(level, PCMK_XA_TARGET_VALUE)); default: return pcmk__assert_asprintf("unknown-%s", pcmk__xe_id(level)); } } /*! * \internal * \brief Parse target identification from topology level XML * * \param[in] level Topology level XML to parse * * \return How to identify target of \p level */ static enum fenced_target_by unpack_level_kind(const xmlNode *level) { if (pcmk__xe_get(level, PCMK_XA_TARGET) != NULL) { return fenced_target_by_name; } if (pcmk__xe_get(level, PCMK_XA_TARGET_PATTERN) != NULL) { return fenced_target_by_pattern; } if ((pcmk__xe_get(level, PCMK_XA_TARGET_ATTRIBUTE) != NULL) && (pcmk__xe_get(level, PCMK_XA_TARGET_VALUE) != NULL)) { return fenced_target_by_attribute; } return fenced_target_by_unknown; } /*! * \internal * \brief Unpack essential information from topology request XML * * \param[in] xml Request XML to search * \param[out] mode If not NULL, where to store level kind * \param[out] target If not NULL, where to store representation of target * \param[out] id If not NULL, where to store level number * * \return Topology level XML from within \p xml, or NULL if not found * \note The caller is responsible for freeing \p *target if set. */ static xmlNode * unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target, int *id) { enum fenced_target_by local_mode = fenced_target_by_unknown; char *local_target = NULL; int local_id = 0; /* The level element can be the top element or lower. If top level, don't * search by xpath, because it might give multiple hits if the XML is the * CIB. */ if ((xml != NULL) && !pcmk__xe_is(xml, PCMK_XE_FENCING_LEVEL)) { xml = pcmk__xpath_find_one(xml->doc, "//" PCMK_XE_FENCING_LEVEL, LOG_WARNING); } if (xml != NULL) { local_mode = unpack_level_kind(xml); local_target = stonith_level_key(xml, local_mode); pcmk__xe_get_int(xml, PCMK_XA_INDEX, &local_id); } if (mode != NULL) { *mode = local_mode; } if (id != NULL) { *id = local_id; } if (target != NULL) { *target = local_target; } else { free(local_target); } return xml; } /*! * \internal * \brief Register a fencing topology level for a target * * Given an XML request specifying the target name, level index, and device IDs * for the level, this will create an entry for the target in the global topology * table if one does not already exist, then append the specified device IDs to * the entry's device list for the specified level. * * \param[in] msg XML request for STONITH level registration * \param[out] result Where to set result of registration (can be \c NULL) */ void fenced_register_level(xmlNode *msg, pcmk__action_result_t *result) { int id = 0; xmlNode *level; enum fenced_target_by mode; char *target; stonith_topology_t *tp; const char *value = NULL; CRM_CHECK(msg != NULL, return); level = unpack_level_request(msg, &mode, &target, &id); if (level == NULL) { set_bad_request_result(result); return; } // Ensure an ID was given (even the client API adds an ID) if (pcmk__str_empty(pcmk__xe_id(level))) { crm_warn("Ignoring registration for topology level without ID"); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Topology level is invalid without ID"); return; } // Ensure a valid target was specified if (mode == fenced_target_by_unknown) { crm_warn("Ignoring registration for topology level '%s' " "without valid target", pcmk__xe_id(level)); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid target for topology level '%s'", pcmk__xe_id(level)); return; } // Ensure level ID is in allowed range if ((id < ST__LEVEL_MIN) || (id > ST__LEVEL_MAX)) { crm_warn("Ignoring topology registration for %s with invalid level %d", target, id); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level '%s'", pcmk__s(pcmk__xe_get(level, PCMK_XA_INDEX), ""), pcmk__xe_id(level)); return; } /* Find or create topology table entry */ tp = g_hash_table_lookup(topology, target); if (tp == NULL) { tp = pcmk__assert_alloc(1, sizeof(stonith_topology_t)); tp->kind = mode; tp->target = target; tp->target_value = pcmk__xe_get_copy(level, PCMK_XA_TARGET_VALUE); tp->target_pattern = pcmk__xe_get_copy(level, PCMK_XA_TARGET_PATTERN); tp->target_attribute = pcmk__xe_get_copy(level, PCMK_XA_TARGET_ATTRIBUTE); g_hash_table_replace(topology, tp->target, tp); crm_trace("Added %s (%d) to the topology (%d active entries)", target, (int) mode, g_hash_table_size(topology)); } else { free(target); } if (tp->levels[id] != NULL) { crm_info("Adding to the existing %s[%d] topology entry", tp->target, id); } value = pcmk__xe_get(level, PCMK_XA_DEVICES); if (value != NULL) { /* Empty string and whitespace are not possible with schema validation * enabled. Don't bother handling them specially here. */ gchar **devices = g_strsplit(value, ",", 0); for (char **dev = devices; (dev != NULL) && (*dev != NULL); dev++) { crm_trace("Adding device '%s' for %s[%d]", *dev, tp->target, id); tp->levels[id] = g_list_append(tp->levels[id], pcmk__str_copy(*dev)); } g_strfreev(devices); } { int nlevels = count_active_levels(tp); crm_info("Target %s has %d active fencing level%s", tp->target, nlevels, pcmk__plural_s(nlevels)); } pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } /*! * \internal * \brief Unregister a fencing topology level for a target * * Given an XML request specifying the target name and level index (or 0 for all * levels), this will remove any corresponding entry for the target from the * global topology table. * * \param[in] msg XML request for STONITH level registration * \param[out] result Where to set result of unregistration (can be \c NULL) */ void fenced_unregister_level(xmlNode *msg, pcmk__action_result_t *result) { int id = -1; stonith_topology_t *tp; char *target; xmlNode *level = NULL; level = unpack_level_request(msg, NULL, &target, &id); if (level == NULL) { set_bad_request_result(result); return; } // Ensure level ID is in allowed range if ((id < 0) || (id >= ST__LEVEL_COUNT)) { crm_warn("Ignoring topology unregistration for %s with invalid level %d", target, id); free(target); crm_log_xml_trace(level, "Bad level"); pcmk__format_result(result, CRM_EX_INVALID_PARAM, PCMK_EXEC_INVALID, "Invalid level number '%s' for topology level %s", pcmk__s(pcmk__xe_get(level, PCMK_XA_INDEX), ""), // Client API doesn't add ID to unregistration XML pcmk__s(pcmk__xe_id(level), "")); return; } tp = g_hash_table_lookup(topology, target); if (tp == NULL) { guint nentries = g_hash_table_size(topology); crm_info("No fencing topology found for %s (%d active %s)", target, nentries, pcmk__plural_alt(nentries, "entry", "entries")); } else if (id == 0 && g_hash_table_remove(topology, target)) { guint nentries = g_hash_table_size(topology); crm_info("Removed all fencing topology entries related to %s " "(%d active %s remaining)", target, nentries, pcmk__plural_alt(nentries, "entry", "entries")); } else if (tp->levels[id] != NULL) { guint nlevels; g_list_free_full(tp->levels[id], free); tp->levels[id] = NULL; nlevels = count_active_levels(tp); crm_info("Removed level %d from fencing topology for %s " "(%d active level%s remaining)", id, target, nlevels, pcmk__plural_s(nlevels)); } free(target); pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } static char * list_to_string(GList *list, const char *delim, gboolean terminate_with_delim) { int max = g_list_length(list); size_t delim_len = delim?strlen(delim):0; size_t alloc_size = 1 + (max?((max-1+(terminate_with_delim?1:0))*delim_len):0); char *rv; GList *gIter; char *pos = NULL; const char *lead_delim = ""; for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; alloc_size += strlen(value); } rv = pcmk__assert_alloc(alloc_size, sizeof(char)); pos = rv; for (gIter = list; gIter != NULL; gIter = gIter->next) { const char *value = (const char *) gIter->data; pos = &pos[sprintf(pos, "%s%s", lead_delim, value)]; lead_delim = delim; } if (max && terminate_with_delim) { sprintf(pos, "%s", delim); } return rv; } /*! * \internal * \brief Execute a fence agent action directly (and asynchronously) * * Handle a STONITH_OP_EXEC API message by scheduling a requested agent action * directly on a specified device. Only list, monitor, and status actions are * expected to use this call, though it should work with any agent command. * * \param[in] msg Request XML specifying action * \param[out] result Where to store result of action * * \note If the action is monitor, the device must be registered via the API * (CIB registration is not sufficient), because monitor should not be * possible unless the device is "started" (API registered). */ static void execute_agent_action(xmlNode *msg, pcmk__action_result_t *result) { xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_ST_DEVICE_ID, LOG_ERR); xmlNode *op = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_ERR); const char *id = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); const char *action = pcmk__xe_get(op, PCMK__XA_ST_DEVICE_ACTION); async_command_t *cmd = NULL; fenced_device_t *device = NULL; if ((id == NULL) || (action == NULL)) { crm_info("Malformed API action request: device %s, action %s", (id? id : "not specified"), (action? action : "not specified")); set_bad_request_result(result); return; } if (pcmk__str_eq(id, STONITH_WATCHDOG_ID, pcmk__str_none)) { // Watchdog agent actions are implemented internally if (stonith_watchdog_timeout_ms <= 0) { pcmk__set_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Watchdog fence device not configured"); return; } else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_result_output(result, list_to_string(stonith_watchdog_targets, "\n", TRUE), NULL); return; } else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) { pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return; } } device = g_hash_table_lookup(device_table, id); if (device == NULL) { crm_info("Ignoring API '%s' action request because device %s not found", action, id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "'%s' not found", id); return; } else if (!pcmk__is_set(device->flags, fenced_df_api_registered) && (strcmp(action, PCMK_ACTION_MONITOR) == 0)) { // Monitors may run only on "started" (API-registered) devices crm_info("Ignoring API '%s' action request because device %s not active", action, id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "'%s' not active", id); return; } cmd = create_async_command(msg); if (cmd == NULL) { crm_log_xml_warn(msg, "invalid"); set_bad_request_result(result); return; } schedule_stonith_command(cmd, device); pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } static void search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence) { search->replies_received++; if (can_fence && device) { if (search->support_action_only != fenced_df_none) { fenced_device_t *dev = g_hash_table_lookup(device_table, device); if (dev && !pcmk__is_set(dev->flags, search->support_action_only)) { return; } } search->capable = g_list_append(search->capable, pcmk__str_copy(device)); } if (search->replies_needed == search->replies_received) { guint ndevices = g_list_length(search->capable); crm_debug("Search found %d device%s that can perform '%s' targeting %s", ndevices, pcmk__plural_s(ndevices), (search->action? search->action : "unknown action"), (search->host? search->host : "any node")); search->callback(search->capable, search->user_data); free(search->host); free(search->action); free(search); } } /*! * \internal * \brief Check whether the local host is allowed to execute a fencing action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Hostname of fence target * \param[in] allow_self Whether self-fencing is allowed for this operation * * \return TRUE if local host is allowed to execute action, FALSE otherwise */ static gboolean localhost_is_eligible(const fenced_device_t *device, const char *action, const char *target, gboolean allow_self) { gboolean localhost_is_target = pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei); if ((device != NULL) && (action != NULL) && (device->on_target_actions != NULL) && (strstr((const char*) device->on_target_actions->str, action) != NULL)) { if (!localhost_is_target) { crm_trace("Operation '%s' using %s can only be executed for local " "host, not %s", action, device->id, target); return FALSE; } } else if (localhost_is_target && !allow_self) { crm_trace("'%s' operation does not support self-fencing", action); return FALSE; } return TRUE; } /*! * \internal * \brief Check if local node is allowed to execute (possibly remapped) action * * \param[in] device Fence device to check * \param[in] action Fence action to check * \param[in] target Node name of fence target * \param[in] allow_self Whether self-fencing is allowed for this operation * * \return true if local node is allowed to execute \p action or any actions it * might be remapped to, otherwise false */ static bool localhost_is_eligible_with_remap(const fenced_device_t *device, const char *action, const char *target, gboolean allow_self) { // Check exact action if (localhost_is_eligible(device, action, target, allow_self)) { return true; } // Check potential remaps if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* "reboot" might get remapped to "off" then "on", so even if reboot is * disallowed, return true if either of those is allowed. We'll report * the disallowed actions with the results. We never allow self-fencing * for remapped "on" actions because the target is off at that point. */ if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self) || localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) { return true; } } return false; } /*! * \internal * \brief Check whether we can use a device's cached target list * * \param[in] dev Fencing device to check * * \return \c true if \p dev cached its targets less than a minute ago, * otherwise \c false */ static inline bool can_use_target_cache(const fenced_device_t *dev) { return (dev->targets != NULL) && (time(NULL) < (dev->targets_age + 60)); } static void can_fence_host_with_device(fenced_device_t *dev, struct device_search_s *search) { gboolean can = FALSE; const char *check_type = "Internal bug"; const char *target = NULL; const char *alias = NULL; const char *dev_id = "Unspecified device"; const char *action = (search == NULL)? NULL : search->action; CRM_CHECK((dev != NULL) && (action != NULL), goto search_report_results); if (dev->id != NULL) { dev_id = dev->id; } target = search->host; if (target == NULL) { can = TRUE; check_type = "No target"; goto search_report_results; } /* Answer immediately if the device does not support the action * or the local node is not allowed to perform it */ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none) && !pcmk__is_set(dev->flags, fenced_df_supports_on)) { check_type = "Agent does not support 'on'"; goto search_report_results; } else if (!localhost_is_eligible_with_remap(dev, action, target, search->allow_self)) { check_type = "This node is not allowed to execute action"; goto search_report_results; } // Check eligibility as specified by pcmk_host_check check_type = target_list_type(dev); alias = g_hash_table_lookup(dev->aliases, target); if (pcmk__str_eq(check_type, PCMK_VALUE_NONE, pcmk__str_casei)) { can = TRUE; } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATIC_LIST, pcmk__str_casei)) { if (pcmk__str_in_list(target, dev->targets, pcmk__str_casei)) { can = TRUE; } else if (g_hash_table_lookup(dev->params, PCMK_STONITH_HOST_MAP) && g_hash_table_lookup(dev->aliases, target)) { can = TRUE; } } else if (pcmk__str_eq(check_type, PCMK_VALUE_DYNAMIC_LIST, pcmk__str_casei)) { if (!can_use_target_cache(dev)) { int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_list_timeout (%ds) parameter of %s " "is larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), timeout may occur", device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL, search->per_device_timeout, search, dynamic_list_search_cb); /* we'll respond to this search request async in the cb */ return; } if (pcmk__str_in_list(((alias == NULL)? target : alias), dev->targets, pcmk__str_casei)) { can = TRUE; } } else if (pcmk__str_eq(check_type, PCMK_VALUE_STATUS, pcmk__str_casei)) { int device_timeout = get_action_timeout(dev, check_type, search->per_device_timeout); if (device_timeout > search->per_device_timeout) { crm_notice("Since the pcmk_status_timeout (%ds) parameter of %s is " "larger than " PCMK_OPT_STONITH_TIMEOUT " (%ds), " "timeout may occur", device_timeout, dev_id, search->per_device_timeout); } crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)", check_type, dev_id, target, action); schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target, search->per_device_timeout, search, status_search_cb); /* we'll respond to this search request async in the cb */ return; } else { crm_err("Invalid value for " PCMK_STONITH_HOST_CHECK ": %s", check_type); check_type = "Invalid " PCMK_STONITH_HOST_CHECK; } search_report_results: crm_info("%s is%s eligible to fence (%s) %s%s%s%s: %s", dev_id, (can? "" : " not"), pcmk__s(action, "unspecified action"), pcmk__s(target, "unspecified target"), (alias == NULL)? "" : " (as '", pcmk__s(alias, ""), (alias == NULL)? "" : "')", check_type); search_devices_record_result(search, ((dev == NULL)? NULL : dev_id), can); } static void search_devices(gpointer key, gpointer value, gpointer user_data) { fenced_device_t *dev = value; struct device_search_s *search = user_data; can_fence_host_with_device(dev, search); } #define DEFAULT_QUERY_TIMEOUT 20 static void get_capable_devices(const char *host, const char *action, int timeout, bool allow_self, void *user_data, void (*callback) (GList * devices, void *user_data), uint32_t support_action_only) { struct device_search_s *search; guint ndevices = g_hash_table_size(device_table); if (ndevices == 0) { callback(NULL, user_data); return; } search = pcmk__assert_alloc(1, sizeof(struct device_search_s)); search->host = pcmk__str_copy(host); search->action = pcmk__str_copy(action); search->per_device_timeout = timeout; search->allow_self = allow_self; search->callback = callback; search->user_data = user_data; search->support_action_only = support_action_only; /* We are guaranteed this many replies, even if a device is * unregistered while the search is in progress. */ search->replies_needed = ndevices; crm_debug("Searching %d device%s to see which can execute '%s' targeting %s", ndevices, pcmk__plural_s(ndevices), (search->action? search->action : "unknown action"), (search->host? search->host : "any node")); fenced_foreach_device(search_devices, search); } struct st_query_data { xmlNode *reply; char *remote_peer; char *client_id; char *target; char *action; int call_options; }; /*! * \internal * \brief Add action-specific attributes to query reply XML * * \param[in,out] xml XML to add attributes to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target */ static void add_action_specific_attributes(xmlNode *xml, const char *action, const fenced_device_t *device, const char *target) { int action_specific_timeout; int delay_max; int delay_base; CRM_CHECK(xml && action && device, return); // PCMK__XA_ST_REQUIRED is currently used only for unfencing if (is_action_required(action, device)) { crm_trace("Action '%s' is required using %s", action, device->id); pcmk__xe_set_int(xml, PCMK__XA_ST_REQUIRED, 1); } // pcmk__timeout if configured action_specific_timeout = get_action_timeout(device, action, 0); if (action_specific_timeout) { crm_trace("Action '%s' has timeout %ds using %s", action, action_specific_timeout, device->id); pcmk__xe_set_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, action_specific_timeout); } delay_max = get_action_delay_max(device, action); if (delay_max > 0) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); pcmk__xe_set_int(xml, PCMK__XA_ST_DELAY_MAX, delay_max); } delay_base = get_action_delay_base(device, action, target); if (delay_base > 0) { pcmk__xe_set_int(xml, PCMK__XA_ST_DELAY_BASE, delay_base); } if ((delay_max > 0) && (delay_base == 0)) { crm_trace("Action '%s' has maximum random delay %ds using %s", action, delay_max, device->id); } else if ((delay_max == 0) && (delay_base > 0)) { crm_trace("Action '%s' has a static delay of %ds using %s", action, delay_base, device->id); } else if ((delay_max > 0) && (delay_base > 0)) { crm_trace("Action '%s' has a minimum delay of %ds and a randomly chosen " "maximum delay of %ds using %s", action, delay_base, delay_max, device->id); } } /*! * \internal * \brief Add "disallowed" attribute to query reply XML if appropriate * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_self Whether self-fencing is allowed */ static void add_disallowed(xmlNode *xml, const char *action, const fenced_device_t *device, const char *target, gboolean allow_self) { if (!localhost_is_eligible(device, action, target, allow_self)) { crm_trace("Action '%s' using %s is disallowed for local host", action, device->id); - pcmk__xe_set_bool_attr(xml, PCMK__XA_ST_ACTION_DISALLOWED, true); + pcmk__xe_set_bool(xml, PCMK__XA_ST_ACTION_DISALLOWED, true); } } /*! * \internal * \brief Add child element with action-specific values to query reply XML * * \param[in,out] xml XML to add attribute to * \param[in] action Fence action * \param[in] device Fence device * \param[in] target Fence target * \param[in] allow_self Whether self-fencing is allowed */ static void add_action_reply(xmlNode *xml, const char *action, const fenced_device_t *device, const char *target, gboolean allow_self) { xmlNode *child = pcmk__xe_create(xml, PCMK__XE_ST_DEVICE_ACTION); pcmk__xe_set(child, PCMK_XA_ID, action); add_action_specific_attributes(child, action, device, target); add_disallowed(child, action, device, target, allow_self); } /*! * \internal * \brief Send a reply to a CPG peer or IPC client * * \param[in] reply XML reply to send * \param[in] call_options Send synchronously if st_opt_sync_call is set * \param[in] remote_peer If not NULL, name of peer node to send CPG reply * \param[in,out] client If not NULL, client to send IPC reply */ static void stonith_send_reply(const xmlNode *reply, int call_options, const char *remote_peer, pcmk__client_t *client) { CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)), return); if (remote_peer == NULL) { do_local_reply(reply, client, call_options); } else { const pcmk__node_status_t *node = pcmk__get_node(0, remote_peer, NULL, pcmk__node_search_cluster_member); pcmk__cluster_send_message(node, pcmk_ipc_fenced, reply); } } static void stonith_query_capable_device_cb(GList * devices, void *user_data) { struct st_query_data *query = user_data; int available_devices = 0; xmlNode *wrapper = NULL; xmlNode *list = NULL; GList *lpc = NULL; pcmk__client_t *client = NULL; if (query->client_id != NULL) { client = pcmk__find_client_by_id(query->client_id); if ((client == NULL) && (query->remote_peer == NULL)) { crm_trace("Skipping reply to %s: no longer a client", query->client_id); goto done; } } // Pack the results into XML wrapper = pcmk__xe_create(query->reply, PCMK__XE_ST_CALLDATA); list = pcmk__xe_create(wrapper, __func__); pcmk__xe_set(list, PCMK__XA_ST_TARGET, query->target); for (lpc = devices; lpc != NULL; lpc = lpc->next) { fenced_device_t *device = g_hash_table_lookup(device_table, lpc->data); const char *action = query->action; xmlNode *dev = NULL; if (!device) { /* It is possible the device got unregistered while * determining who can fence the target */ continue; } available_devices++; dev = pcmk__xe_create(list, PCMK__XE_ST_DEVICE_ID); pcmk__xe_set(dev, PCMK_XA_ID, device->id); pcmk__xe_set(dev, PCMK__XA_NAMESPACE, device->namespace); pcmk__xe_set(dev, PCMK_XA_AGENT, device->agent); // Has had successful monitor, list, or status on this node pcmk__xe_set_int(dev, PCMK__XA_ST_MONITOR_VERIFIED, pcmk__is_set(device->flags, fenced_df_verified)); pcmk__xe_set_int(dev, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, device->flags); /* If the originating fencer wants to reboot the node, and we have a * capable device that doesn't support "reboot", remap to "off" instead. */ if (!pcmk__is_set(device->flags, fenced_df_supports_reboot) && pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { crm_trace("%s doesn't support reboot, using values for off instead", device->id); action = PCMK_ACTION_OFF; } /* Add action-specific values if available */ add_action_specific_attributes(dev, action, device, query->target); if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A "reboot" *might* get remapped to "off" then "on", so after * sending the "reboot"-specific values in the main element, we add * sub-elements for "off" and "on" values. * * We short-circuited earlier if "reboot", "off" and "on" are all * disallowed for the local host. However if only one or two are * disallowed, we send back the results and mark which ones are * disallowed. If "reboot" is disallowed, this might cause problems * with older fencer versions, which won't check for it. Older * versions will ignore "off" and "on", so they are not a problem. */ add_disallowed(dev, action, device, query->target, pcmk__is_set(query->call_options, st_opt_allow_self_fencing)); add_action_reply(dev, PCMK_ACTION_OFF, device, query->target, pcmk__is_set(query->call_options, st_opt_allow_self_fencing)); add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE); } /* A query without a target wants device parameters */ if (query->target == NULL) { xmlNode *attrs = pcmk__xe_create(dev, PCMK__XE_ATTRIBUTES); g_hash_table_foreach(device->params, hash2field, attrs); } } pcmk__xe_set_int(list, PCMK__XA_ST_AVAILABLE_DEVICES, available_devices); if (query->target) { crm_debug("Found %d matching device%s for target '%s'", available_devices, pcmk__plural_s(available_devices), query->target); } else { crm_debug("%d device%s installed", available_devices, pcmk__plural_s(available_devices)); } crm_log_xml_trace(list, "query-result"); stonith_send_reply(query->reply, query->call_options, query->remote_peer, client); done: pcmk__xml_free(query->reply); free(query->remote_peer); free(query->client_id); free(query->target); free(query->action); free(query); g_list_free_full(devices, free); } /*! * \internal * \brief Log the result of an asynchronous command * * \param[in] cmd Command the result is for * \param[in] result Result of command * \param[in] pid Process ID of command, if available * \param[in] next Alternate device that will be tried if command failed * \param[in] op_merged Whether this command was merged with an earlier one */ static void log_async_result(const async_command_t *cmd, const pcmk__action_result_t *result, int pid, const char *next, bool op_merged) { int log_level = LOG_ERR; int output_log_level = LOG_NEVER; guint devices_remaining = g_list_length(cmd->next_device_iter); GString *msg = g_string_sized_new(80); // Reasonable starting size // Choose log levels appropriately if we have a result if (pcmk__result_ok(result)) { log_level = (cmd->target == NULL)? LOG_DEBUG : LOG_NOTICE; if ((result->action_stdout != NULL) && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, pcmk__str_none)) { output_log_level = LOG_DEBUG; } next = NULL; } else { log_level = (cmd->target == NULL)? LOG_NOTICE : LOG_ERR; if ((result->action_stdout != NULL) && !pcmk__str_eq(cmd->action, PCMK_ACTION_METADATA, pcmk__str_none)) { output_log_level = LOG_WARNING; } } // Build the log message piece by piece pcmk__g_strcat(msg, "Operation '", cmd->action, "' ", NULL); if (pid != 0) { g_string_append_printf(msg, "[%d] ", pid); } if (cmd->target != NULL) { pcmk__g_strcat(msg, "targeting ", cmd->target, " ", NULL); } if (cmd->device != NULL) { pcmk__g_strcat(msg, "using ", cmd->device, " ", NULL); } // Add exit status or execution status as appropriate if (result->execution_status == PCMK_EXEC_DONE) { g_string_append_printf(msg, "returned %d", result->exit_status); } else { pcmk__g_strcat(msg, "could not be executed: ", pcmk_exec_status_str(result->execution_status), NULL); } // Add exit reason and next device if appropriate if (result->exit_reason != NULL) { pcmk__g_strcat(msg, " (", result->exit_reason, ")", NULL); } if (next != NULL) { pcmk__g_strcat(msg, ", retrying with ", next, NULL); } if (devices_remaining > 0) { g_string_append_printf(msg, " (%u device%s remaining)", (unsigned int) devices_remaining, pcmk__plural_s(devices_remaining)); } g_string_append_printf(msg, " " QB_XS " %scall %d from %s", (op_merged? "merged " : ""), cmd->id, cmd->client_name); // Log the result do_crm_log(log_level, "%s", msg->str); g_string_free(msg, TRUE); // Log the output (which may have multiple lines), if appropriate if (output_log_level != LOG_NEVER) { char *prefix = pcmk__assert_asprintf("%s[%d]", cmd->device, pid); crm_log_output(output_log_level, prefix, result->action_stdout); free(prefix); } } /*! * \internal * \brief Reply to requester after asynchronous command completion * * \param[in] cmd Command that completed * \param[in] result Result of command * \param[in] pid Process ID of command, if available * \param[in] merged If true, command was merged with another, not executed */ static void send_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result, int pid, bool merged) { xmlNode *reply = NULL; pcmk__client_t *client = NULL; CRM_CHECK((cmd != NULL) && (result != NULL), return); log_async_result(cmd, result, pid, NULL, merged); if (cmd->client != NULL) { client = pcmk__find_client_by_id(cmd->client); if ((client == NULL) && (cmd->origin == NULL)) { crm_trace("Skipping reply to %s: no longer a client", cmd->client); return; } } reply = construct_async_reply(cmd, result); if (merged) { - pcmk__xe_set_bool_attr(reply, PCMK__XA_ST_OP_MERGED, true); + pcmk__xe_set_bool(reply, PCMK__XA_ST_OP_MERGED, true); } if (pcmk__is_fencing_action(cmd->action) && pcmk__str_eq(cmd->origin, cmd->target, pcmk__str_casei)) { /* The target was also the originator, so broadcast the result on its * behalf (since it will be unable to). */ crm_trace("Broadcast '%s' result for %s (target was also originator)", cmd->action, cmd->target); pcmk__xe_set(reply, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); pcmk__xe_set(reply, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, reply); } else { // Reply only to the originator stonith_send_reply(reply, cmd->options, cmd->origin, client); } crm_log_xml_trace(reply, "Reply"); pcmk__xml_free(reply); } static void cancel_stonith_command(async_command_t * cmd) { fenced_device_t *device = cmd_device(cmd); if (device) { crm_trace("Cancel scheduled '%s' action using %s", cmd->action, device->id); device->pending_ops = g_list_remove(device->pending_ops, cmd); } } /*! * \internal * \brief Cancel and reply to any duplicates of a just-completed operation * * Check whether any fencing operations are scheduled to do the same thing as * one that just succeeded. If so, rather than performing the same operation * twice, return the result of this operation for all matching pending commands. * * \param[in,out] cmd Fencing operation that just succeeded * \param[in] result Result of \p cmd * \param[in] pid If nonzero, process ID of agent invocation (for logs) * * \note Duplicate merging will do the right thing for either type of remapped * reboot. If the executing fencer remapped an unsupported reboot to off, * then cmd->action will be "reboot" and will be merged with any other * reboot requests. If the originating fencer remapped a topology reboot * to off then on, we will get here once with cmd->action "off" and once * with "on", and they will be merged separately with similar requests. */ static void reply_to_duplicates(async_command_t *cmd, const pcmk__action_result_t *result, int pid) { GList *next = NULL; for (GList *iter = cmd_list; iter != NULL; iter = next) { async_command_t *cmd_other = iter->data; next = iter->next; // We might delete this entry, so grab next now if (cmd == cmd_other) { continue; } /* A pending operation matches if: * 1. The client connections are different. * 2. The target is the same. * 3. The fencing action is the same. * 4. The device scheduled to execute the action is the same. */ if (pcmk__str_eq(cmd->client, cmd_other->client, pcmk__str_casei) || !pcmk__str_eq(cmd->target, cmd_other->target, pcmk__str_casei) || !pcmk__str_eq(cmd->action, cmd_other->action, pcmk__str_none) || !pcmk__str_eq(cmd->device, cmd_other->device, pcmk__str_casei)) { continue; } crm_notice("Merging fencing action '%s'%s%s originating from " "client %s with identical fencing request from client %s", cmd_other->action, (cmd_other->target == NULL)? "" : " targeting ", pcmk__s(cmd_other->target, ""), cmd_other->client_name, cmd->client_name); // Stop tracking the duplicate, send its result, and cancel it cmd_list = g_list_remove_link(cmd_list, iter); send_async_reply(cmd_other, result, pid, true); cancel_stonith_command(cmd_other); free_async_command(cmd_other); g_list_free_1(iter); } } /*! * \internal * \brief Return the next required device (if any) for an operation * * \param[in,out] cmd Fencing operation that just succeeded * * \return Next device required for action if any, otherwise NULL */ static fenced_device_t * next_required_device(async_command_t *cmd) { for (GList *iter = cmd->next_device_iter; iter != NULL; iter = iter->next) { fenced_device_t *next_device = g_hash_table_lookup(device_table, iter->data); if (is_action_required(cmd->action, next_device)) { /* This is only called for successful actions, so it's OK to skip * non-required devices. */ cmd->next_device_iter = iter->next; return next_device; } } return NULL; } static void st_child_done(int pid, const pcmk__action_result_t *result, void *user_data) { async_command_t *cmd = user_data; fenced_device_t *device = NULL; fenced_device_t *next_device = NULL; CRM_CHECK(cmd != NULL, return); device = cmd_device(cmd); cmd->active_on = NULL; /* The device is ready to do something else now */ if (device) { if (!pcmk__is_set(device->flags, fenced_df_verified) && pcmk__result_ok(result) && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS, NULL)) { fenced_device_set_flags(device, fenced_df_verified); } mainloop_set_trigger(device->work); } if (pcmk__result_ok(result)) { next_device = next_required_device(cmd); } else if ((cmd->next_device_iter != NULL) && !is_action_required(cmd->action, device)) { /* if this device didn't work out, see if there are any others we can try. * if the failed device was 'required', we can't pick another device. */ next_device = g_hash_table_lookup(device_table, cmd->next_device_iter->data); cmd->next_device_iter = cmd->next_device_iter->next; } if (next_device == NULL) { send_async_reply(cmd, result, pid, false); if (pcmk__result_ok(result)) { reply_to_duplicates(cmd, result, pid); } free_async_command(cmd); } else { // This operation requires more fencing log_async_result(cmd, result, pid, next_device->id, false); schedule_stonith_command(cmd, next_device); } } static void stonith_fence_get_devices_cb(GList * devices, void *user_data) { async_command_t *cmd = user_data; fenced_device_t *device = NULL; guint ndevices = g_list_length(devices); crm_info("Found %d matching device%s for target '%s'", ndevices, pcmk__plural_s(ndevices), cmd->target); if (devices != NULL) { device = g_hash_table_lookup(device_table, devices->data); } if (device == NULL) { // No device found pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; pcmk__format_result(&result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "No device configured for target '%s'", cmd->target); send_async_reply(cmd, &result, 0, false); pcmk__reset_result(&result); free_async_command(cmd); g_list_free_full(devices, free); } else { /* Device found. Schedule a fencing command for it. * * Assign devices to device_list so that it will be freed with cmd. */ cmd->device_list = devices; cmd->next_device_iter = devices->next; schedule_stonith_command(cmd, device); } } /*! * \internal * \brief Execute a fence action via the local node * * \param[in] msg Fencing request * \param[out] result Where to store result of fence action */ static void fence_locally(xmlNode *msg, pcmk__action_result_t *result) { const char *device_id = NULL; fenced_device_t *device = NULL; async_command_t *cmd = NULL; xmlNode *dev = NULL; CRM_CHECK((msg != NULL) && (result != NULL), return); dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_ERR); cmd = create_async_command(msg); if (cmd == NULL) { crm_log_xml_warn(msg, "invalid"); set_bad_request_result(result); return; } device_id = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (device_id != NULL) { device = g_hash_table_lookup(device_table, device_id); if (device == NULL) { crm_err("Requested device '%s' is not available", device_id); pcmk__format_result(result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, "Requested device '%s' not found", device_id); return; } schedule_stonith_command(cmd, device); } else { const char *host = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); if (pcmk__is_set(cmd->options, st_opt_cs_nodeid)) { int nodeid = 0; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(host, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); if (node != NULL) { host = node->name; } } /* If we get to here, then self-fencing is implicitly allowed */ get_capable_devices(host, cmd->action, cmd->default_timeout, TRUE, cmd, stonith_fence_get_devices_cb, fenced_support_flag(cmd->action)); } pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } /*! * \internal * \brief Build an XML reply for a fencing operation * * \param[in] request Request that reply is for * \param[in] data If not NULL, add to reply as call data * \param[in] result Full result of fencing operation * * \return Newly created XML reply * \note The caller is responsible for freeing the result. * \note This has some overlap with construct_async_reply(), but that copies * values from an async_command_t, whereas this one copies them from the * request. */ xmlNode * fenced_construct_reply(const xmlNode *request, xmlNode *data, const pcmk__action_result_t *result) { xmlNode *reply = NULL; reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); pcmk__xe_set(reply, PCMK__XA_ST_ORIGIN, __func__); pcmk__xe_set(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); stonith__xe_set_result(reply, result); if (request == NULL) { /* Most likely, this is the result of a stonith operation that was * initiated before we came up. Unfortunately that means we lack enough * information to provide clients with a full result. * * @TODO Maybe synchronize this information at start-up? */ crm_warn("Missing request information for client notifications for " "operation with result '%s' (initiated before we came up?)", pcmk_exec_status_str(result->execution_status)); } else { const char *name = NULL; const char *value = NULL; // Attributes to copy from request to reply const char *names[] = { PCMK__XA_ST_OP, PCMK__XA_ST_CALLID, PCMK__XA_ST_CLIENTID, PCMK__XA_ST_CLIENTNAME, PCMK__XA_ST_REMOTE_OP, PCMK__XA_ST_CALLOPT, }; for (int lpc = 0; lpc < PCMK__NELEM(names); lpc++) { name = names[lpc]; value = pcmk__xe_get(request, name); pcmk__xe_set(reply, name, value); } if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(reply, PCMK__XE_ST_CALLDATA); pcmk__xml_copy(wrapper, data); } } return reply; } /*! * \internal * \brief Build an XML reply to an asynchronous fencing command * * \param[in] cmd Fencing command that reply is for * \param[in] result Command result */ static xmlNode * construct_async_reply(const async_command_t *cmd, const pcmk__action_result_t *result) { xmlNode *reply = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); pcmk__xe_set(reply, PCMK__XA_ST_ORIGIN, __func__); pcmk__xe_set(reply, PCMK__XA_T, PCMK__VALUE_STONITH_NG); pcmk__xe_set(reply, PCMK__XA_ST_OP, cmd->op); pcmk__xe_set(reply, PCMK__XA_ST_DEVICE_ID, cmd->device); pcmk__xe_set(reply, PCMK__XA_ST_REMOTE_OP, cmd->remote_op_id); pcmk__xe_set(reply, PCMK__XA_ST_CLIENTID, cmd->client); pcmk__xe_set(reply, PCMK__XA_ST_CLIENTNAME, cmd->client_name); pcmk__xe_set(reply, PCMK__XA_ST_TARGET, cmd->target); pcmk__xe_set(reply, PCMK__XA_ST_DEVICE_ACTION, cmd->op); pcmk__xe_set(reply, PCMK__XA_ST_ORIGIN, cmd->origin); pcmk__xe_set_int(reply, PCMK__XA_ST_CALLID, cmd->id); pcmk__xe_set_int(reply, PCMK__XA_ST_CALLOPT, cmd->options); stonith__xe_set_result(reply, result); return reply; } bool fencing_peer_active(pcmk__node_status_t *peer) { return (peer != NULL) && (peer->name != NULL) && pcmk__is_set(peer->processes, crm_get_cluster_proc()); } void set_fencing_completed(remote_fencing_op_t *op) { struct timespec tv; qb_util_timespec_from_epoch_get(&tv); op->completed = tv.tv_sec; op->completed_nsec = tv.tv_nsec; } /*! * \internal * \brief Look for alternate node needed if local node shouldn't fence target * * \param[in] target Node that must be fenced * * \return Name of an alternate node that should fence \p target if any, * or NULL otherwise */ static const char * check_alternate_host(const char *target) { if (pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) { GHashTableIter gIter; pcmk__node_status_t *entry = NULL; g_hash_table_iter_init(&gIter, pcmk__peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if (fencing_peer_active(entry) && !pcmk__str_eq(entry->name, target, pcmk__str_casei)) { crm_notice("Forwarding self-fencing request to %s", entry->name); return entry->name; } } crm_warn("Will handle own fencing because no peer can"); } return NULL; } static void remove_relay_op(xmlNode * request) { xmlNode *dev = pcmk__xpath_find_one(request->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_TRACE); const char *relay_op_id = NULL; const char *op_id = NULL; const char *client_name = NULL; const char *target = NULL; remote_fencing_op_t *relay_op = NULL; if (dev) { target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); } relay_op_id = pcmk__xe_get(request, PCMK__XA_ST_REMOTE_OP_RELAY); op_id = pcmk__xe_get(request, PCMK__XA_ST_REMOTE_OP); client_name = pcmk__xe_get(request, PCMK__XA_ST_CLIENTNAME); /* Delete RELAY operation. */ if ((relay_op_id != NULL) && (target != NULL) && pcmk__str_eq(target, fenced_get_local_node(), pcmk__str_casei)) { relay_op = g_hash_table_lookup(stonith_remote_op_list, relay_op_id); if (relay_op) { GHashTableIter iter; remote_fencing_op_t *list_op = NULL; g_hash_table_iter_init(&iter, stonith_remote_op_list); /* If the operation to be deleted is registered as a duplicate, delete the registration. */ while (g_hash_table_iter_next(&iter, NULL, (void **)&list_op)) { GList *dup_iter = NULL; if (list_op != relay_op) { for (dup_iter = list_op->duplicates; dup_iter != NULL; dup_iter = dup_iter->next) { remote_fencing_op_t *other = dup_iter->data; if (other == relay_op) { other->duplicates = g_list_remove(other->duplicates, relay_op); break; } } } } crm_debug("Deleting relay op %s ('%s'%s%s for %s), " "replaced by op %s ('%s'%s%s for %s)", relay_op->id, relay_op->action, (relay_op->target == NULL)? "" : " targeting ", pcmk__s(relay_op->target, ""), relay_op->client_name, op_id, relay_op->action, (target == NULL)? "" : " targeting ", pcmk__s(target, ""), client_name); g_hash_table_remove(stonith_remote_op_list, relay_op_id); } } } /*! * \internal * \brief Check whether an API request was sent by a privileged user * * API commands related to fencing configuration may be done only by privileged * IPC users (i.e. root or hacluster), because all other users should go through * the CIB to have ACLs applied. If no client was given, this is a peer request, * which is always allowed. * * \param[in] c IPC client that sent request (or NULL if sent by CPG peer) * \param[in] op Requested API operation (for logging only) * * \return true if sender is peer or privileged client, otherwise false */ static inline bool is_privileged(const pcmk__client_t *c, const char *op) { if ((c == NULL) || pcmk__is_set(c->flags, pcmk__client_privileged)) { return true; } else { crm_warn("Rejecting IPC request '%s' from unprivileged client %s", pcmk__s(op, ""), pcmk__client_name(c)); return false; } } // CRM_OP_REGISTER static xmlNode * handle_register_request(pcmk__request_t *request) { xmlNode *reply = pcmk__xe_create(NULL, "reply"); pcmk__assert(request->ipc_client != NULL); pcmk__xe_set(reply, PCMK__XA_ST_OP, CRM_OP_REGISTER); pcmk__xe_set(reply, PCMK__XA_ST_CLIENTID, request->ipc_client->id); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return reply; } // STONITH_OP_EXEC static xmlNode * handle_agent_request(pcmk__request_t *request) { execute_agent_action(request->xml, &request->result); if (request->result.execution_status == PCMK_EXEC_PENDING) { return NULL; } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_TIMEOUT_UPDATE static xmlNode * handle_update_timeout_request(pcmk__request_t *request) { const char *call_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CALLID); const char *client_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CLIENTID); int op_timeout = 0; pcmk__xe_get_int(request->xml, PCMK__XA_ST_TIMEOUT, &op_timeout); do_stonith_async_timeout_update(client_id, call_id, op_timeout); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } // STONITH_OP_QUERY static xmlNode * handle_query_request(pcmk__request_t *request) { int timeout = 0; xmlNode *dev = NULL; const char *action = NULL; const char *target = NULL; const char *client_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CLIENTID); struct st_query_data *query = NULL; if (request->peer != NULL) { // Record it for the future notification create_remote_stonith_op(client_id, request->xml, TRUE); } /* Delete the DC node RELAY operation. */ remove_relay_op(request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); dev = pcmk__xpath_find_one(request->xml->doc, "//*[@" PCMK__XA_ST_DEVICE_ACTION "]", LOG_NEVER); if (dev != NULL) { const char *device = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (pcmk__str_eq(device, "manual_ack", pcmk__str_casei)) { return NULL; // No query or reply necessary } target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); action = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ACTION); } crm_log_xml_trace(request->xml, "Query"); query = pcmk__assert_alloc(1, sizeof(struct st_query_data)); query->reply = fenced_construct_reply(request->xml, NULL, &request->result); query->remote_peer = pcmk__str_copy(request->peer); query->client_id = pcmk__str_copy(client_id); query->target = pcmk__str_copy(target); query->action = pcmk__str_copy(action); query->call_options = request->call_options; pcmk__xe_get_int(request->xml, PCMK__XA_ST_TIMEOUT, &timeout); get_capable_devices(target, action, timeout, pcmk__is_set(query->call_options, st_opt_allow_self_fencing), query, stonith_query_capable_device_cb, fenced_df_none); return NULL; } // STONITH_OP_NOTIFY static xmlNode * handle_notify_request(pcmk__request_t *request) { const char *flag_name = NULL; pcmk__assert(request->ipc_client != NULL); flag_name = pcmk__xe_get(request->xml, PCMK__XA_ST_NOTIFY_ACTIVATE); if (flag_name != NULL) { crm_debug("Enabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__set_client_flags(request->ipc_client, fenced_parse_notify_flag(flag_name)); } flag_name = pcmk__xe_get(request->xml, PCMK__XA_ST_NOTIFY_DEACTIVATE); if (flag_name != NULL) { crm_debug("Disabling %s callbacks for client %s", flag_name, pcmk__request_origin(request)); pcmk__clear_client_flags(request->ipc_client, fenced_parse_notify_flag(flag_name)); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); pcmk__set_request_flags(request, pcmk__request_reuse_options); return pcmk__ipc_create_ack(request->ipc_flags, PCMK__XE_ACK, NULL, CRM_EX_OK); } // STONITH_OP_RELAY static xmlNode * handle_relay_request(pcmk__request_t *request) { xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_TRACE); crm_notice("Received forwarded fencing request from " "%s %s to fence (%s) peer %s", pcmk__request_origin_type(request), pcmk__request_origin(request), pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ACTION), pcmk__xe_get(dev, PCMK__XA_ST_TARGET)); if (initiate_remote_stonith_op(NULL, request->xml, FALSE) == NULL) { set_bad_request_result(&request->result); return fenced_construct_reply(request->xml, NULL, &request->result); } pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); return NULL; } // STONITH_OP_FENCE static xmlNode * handle_fence_request(pcmk__request_t *request) { if (request->peer != NULL) { fence_locally(request->xml, &request->result); } else if (pcmk__is_set(request->call_options, st_opt_manual_ack)) { switch (fenced_handle_manual_confirmation(request->ipc_client, request->xml)) { case pcmk_rc_ok: pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); break; case EINPROGRESS: pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); break; default: set_bad_request_result(&request->result); break; } } else { const char *alternate_host = NULL; xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_TRACE); const char *target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); const char *action = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ACTION); const char *device = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (request->ipc_client != NULL) { int tolerance = 0; crm_notice("Client %s wants to fence (%s) %s using %s", pcmk__request_origin(request), action, target, (device? device : "any device")); pcmk__xe_get_int(dev, PCMK__XA_ST_TOLERANCE, &tolerance); if (stonith_check_fence_tolerance(tolerance, target, action)) { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return fenced_construct_reply(request->xml, NULL, &request->result); } alternate_host = check_alternate_host(target); } else { crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'", request->peer, action, target, (device == NULL)? "(any)" : device); } if (alternate_host != NULL) { const char *client_id = NULL; remote_fencing_op_t *op = NULL; pcmk__node_status_t *node = pcmk__get_node(0, alternate_host, NULL, pcmk__node_search_cluster_member); if (request->ipc_client->id == 0) { client_id = pcmk__xe_get(request->xml, PCMK__XA_ST_CLIENTID); } else { client_id = request->ipc_client->id; } /* Create a duplicate fencing operation to relay with the client ID. * When a query response is received, this operation should be * deleted to avoid keeping the duplicate around. */ op = create_remote_stonith_op(client_id, request->xml, FALSE); pcmk__xe_set(request->xml, PCMK__XA_ST_OP, STONITH_OP_RELAY); pcmk__xe_set(request->xml, PCMK__XA_ST_CLIENTID, request->ipc_client->id); pcmk__xe_set(request->xml, PCMK__XA_ST_REMOTE_OP, op->id); // @TODO On failure, fail request immediately, or maybe panic pcmk__cluster_send_message(node, pcmk_ipc_fenced, request->xml); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } else if (initiate_remote_stonith_op(request->ipc_client, request->xml, FALSE) == NULL) { set_bad_request_result(&request->result); } else { pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_PENDING, NULL); } } if (request->result.execution_status == PCMK_EXEC_PENDING) { return NULL; } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_FENCE_HISTORY static xmlNode * handle_history_request(pcmk__request_t *request) { xmlNode *reply = NULL; xmlNode *data = NULL; stonith_fence_history(request->xml, &data, request->peer, request->call_options); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (!pcmk__is_set(request->call_options, st_opt_discard_reply)) { /* When the local node broadcasts its history, it sets * st_opt_discard_reply and doesn't need a reply. */ reply = fenced_construct_reply(request->xml, data, &request->result); } pcmk__xml_free(data); return reply; } // STONITH_OP_DEVICE_ADD static xmlNode * handle_device_add_request(pcmk__request_t *request) { const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//" PCMK__XE_ST_DEVICE_ID, LOG_ERR); if (is_privileged(request->ipc_client, op)) { int rc = fenced_device_register(dev, false); rc = pcmk_rc2legacy(rc); pcmk__set_result(&request->result, ((rc == pcmk_ok)? CRM_EX_OK : CRM_EX_ERROR), stonith__legacy2status(rc), ((rc == pcmk_ok)? NULL : pcmk_strerror(rc))); } else { pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must register device via CIB"); } fenced_send_config_notification(op, &request->result, (dev == NULL)? NULL : pcmk__xe_id(dev)); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_DEVICE_DEL static xmlNode * handle_device_delete_request(pcmk__request_t *request) { xmlNode *dev = pcmk__xpath_find_one(request->xml->doc, "//" PCMK__XE_ST_DEVICE_ID, LOG_ERR); const char *device_id = pcmk__xe_get(dev, PCMK_XA_ID); const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { stonith_device_remove(device_id, false); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must delete device via CIB"); } fenced_send_config_notification(op, &request->result, device_id); return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_LEVEL_ADD static xmlNode * handle_level_add_request(pcmk__request_t *request) { const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_register_level(request->xml, &request->result); } else { unpack_level_request(request->xml, NULL, NULL, NULL); pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must add level via CIB"); } return fenced_construct_reply(request->xml, NULL, &request->result); } // STONITH_OP_LEVEL_DEL static xmlNode * handle_level_delete_request(pcmk__request_t *request) { const char *op = pcmk__xe_get(request->xml, PCMK__XA_ST_OP); if (is_privileged(request->ipc_client, op)) { fenced_unregister_level(request->xml, &request->result); } else { unpack_level_request(request->xml, NULL, NULL, NULL); pcmk__set_result(&request->result, CRM_EX_INSUFFICIENT_PRIV, PCMK_EXEC_INVALID, "Unprivileged users must delete level via CIB"); } return fenced_construct_reply(request->xml, NULL, &request->result); } // CRM_OP_RM_NODE_CACHE static xmlNode * handle_cache_request(pcmk__request_t *request) { int node_id = 0; const char *name = NULL; pcmk__xe_get_int(request->xml, PCMK_XA_ID, &node_id); name = pcmk__xe_get(request->xml, PCMK_XA_UNAME); pcmk__cluster_forget_cluster_node(node_id, name); pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); return NULL; } static xmlNode * handle_unknown_request(pcmk__request_t *request) { crm_err("Unknown IPC request %s from %s %s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request)); pcmk__format_result(&request->result, CRM_EX_PROTOCOL, PCMK_EXEC_INVALID, "Unknown IPC request type '%s' (bug?)", pcmk__s(request->op, "")); return fenced_construct_reply(request->xml, NULL, &request->result); } static void fenced_register_handlers(void) { pcmk__server_command_t handlers[] = { { CRM_OP_REGISTER, handle_register_request }, { STONITH_OP_EXEC, handle_agent_request }, { STONITH_OP_TIMEOUT_UPDATE, handle_update_timeout_request }, { STONITH_OP_QUERY, handle_query_request }, { STONITH_OP_NOTIFY, handle_notify_request }, { STONITH_OP_RELAY, handle_relay_request }, { STONITH_OP_FENCE, handle_fence_request }, { STONITH_OP_FENCE_HISTORY, handle_history_request }, { STONITH_OP_DEVICE_ADD, handle_device_add_request }, { STONITH_OP_DEVICE_DEL, handle_device_delete_request }, { STONITH_OP_LEVEL_ADD, handle_level_add_request }, { STONITH_OP_LEVEL_DEL, handle_level_delete_request }, { CRM_OP_RM_NODE_CACHE, handle_cache_request }, { NULL, handle_unknown_request }, }; fenced_handlers = pcmk__register_handlers(handlers); } void fenced_unregister_handlers(void) { if (fenced_handlers != NULL) { g_hash_table_destroy(fenced_handlers); fenced_handlers = NULL; } } static void handle_request(pcmk__request_t *request) { xmlNode *reply = NULL; const char *reason = NULL; if (fenced_handlers == NULL) { fenced_register_handlers(); } reply = pcmk__process_request(request, fenced_handlers); if (reply != NULL) { if (pcmk__is_set(request->flags, pcmk__request_reuse_options) && (request->ipc_client != NULL)) { /* Certain IPC-only commands must reuse the call options from the * original request rather than the ones set by stonith_send_reply() * -> do_local_reply(). */ pcmk__ipc_send_xml(request->ipc_client, request->ipc_id, reply, request->ipc_flags); request->ipc_client->request_id = 0; } else { stonith_send_reply(reply, request->call_options, request->peer, request->ipc_client); } pcmk__xml_free(reply); } reason = request->result.exit_reason; crm_debug("Processed %s request from %s %s: %s%s%s%s", request->op, pcmk__request_origin_type(request), pcmk__request_origin(request), pcmk_exec_status_str(request->result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); } static void handle_reply(pcmk__client_t *client, xmlNode *request, const char *remote_peer) { // Copy, because request might be freed before we want to log this char *op = pcmk__xe_get_copy(request, PCMK__XA_ST_OP); if (pcmk__str_eq(op, STONITH_OP_QUERY, pcmk__str_none)) { process_remote_stonith_query(request); } else if (pcmk__str_any_of(op, STONITH_OP_NOTIFY, STONITH_OP_FENCE, NULL)) { fenced_process_fencing_reply(request); } else { crm_err("Ignoring unknown %s reply from %s %s", pcmk__s(op, "untyped"), ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); crm_log_xml_warn(request, "UnknownOp"); free(op); return; } crm_debug("Processed %s reply from %s %s", op, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); free(op); } /*! * \internal * \brief Handle a message from an IPC client or CPG peer * * \param[in,out] client If not NULL, IPC client that sent message * \param[in] id If from IPC client, IPC message ID * \param[in] flags Message flags * \param[in,out] message Message XML * \param[in] remote_peer If not NULL, CPG peer that sent message */ void stonith_command(pcmk__client_t *client, uint32_t id, uint32_t flags, xmlNode *message, const char *remote_peer) { uint32_t call_options = st_opt_none; int rc = pcmk_rc_ok; bool is_reply = false; CRM_CHECK(message != NULL, return); if (pcmk__xpath_find_one(message->doc, "//" PCMK__XE_ST_REPLY, LOG_NEVER) != NULL) { is_reply = true; } rc = pcmk__xe_get_flags(message, PCMK__XA_ST_CALLOPT, &call_options, st_opt_none); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from message: %s", pcmk_rc_str(rc)); } crm_debug("Processing %ssynchronous %s %s %u from %s %s", pcmk__is_set(call_options, st_opt_sync_call)? "" : "a", pcmk__xe_get(message, PCMK__XA_ST_OP), (is_reply? "reply" : "request"), id, ((client == NULL)? "peer" : "client"), ((client == NULL)? remote_peer : pcmk__client_name(client))); if (pcmk__is_set(call_options, st_opt_sync_call)) { pcmk__assert((client == NULL) || (client->request_id == id)); } if (is_reply) { handle_reply(client, message, remote_peer); } else { pcmk__request_t request = { .ipc_client = client, .ipc_id = id, .ipc_flags = flags, .peer = remote_peer, .xml = message, .call_options = call_options, .result = PCMK__UNKNOWN_RESULT, }; request.op = pcmk__xe_get_copy(request.xml, PCMK__XA_ST_OP); CRM_CHECK(request.op != NULL, return); if (pcmk__is_set(request.call_options, st_opt_sync_call)) { pcmk__set_request_flags(&request, pcmk__request_sync); } handle_request(&request); pcmk__reset_request(&request); } } diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c index b1b1fb6203..54e45fdf5d 100644 --- a/daemons/fenced/fenced_history.c +++ b/daemons/fenced/fenced_history.c @@ -1,574 +1,574 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // xmlNode #include #include #include #include #include #include #include #include #include #define MAX_STONITH_HISTORY 500 /*! * \internal * \brief Send a broadcast to all nodes to trigger cleanup or * history synchronisation * * \param[in] history Optional history to be attached * \param[in] callopts We control cleanup via a flag in the callopts * \param[in] target Cleanup can be limited to certain fence-targets */ static void stonith_send_broadcast_history(xmlNode *history, int callopts, const char *target) { xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_STONITH_COMMAND); xmlNode *wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); xmlNode *call_data = pcmk__xe_create(wrapper, __func__); pcmk__xe_set(bcast, PCMK__XA_T, PCMK__VALUE_STONITH_NG); pcmk__xe_set(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); pcmk__xe_set(bcast, PCMK__XA_ST_OP, STONITH_OP_FENCE_HISTORY); pcmk__xe_set_int(bcast, PCMK__XA_ST_CALLOPT, callopts); pcmk__xml_copy(call_data, history); if (target != NULL) { pcmk__xe_set(call_data, PCMK__XA_ST_TARGET, target); } pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, bcast); pcmk__xml_free(bcast); } static gboolean stonith_remove_history_entry (gpointer key, gpointer value, gpointer user_data) { remote_fencing_op_t *op = value; const char *target = (const char *) user_data; if ((op->state == st_failed) || (op->state == st_done)) { if ((target) && (strcmp(op->target, target) != 0)) { return FALSE; } return TRUE; } return FALSE; /* don't clean pending operations */ } /*! * \internal * \brief Send out a cleanup broadcast or do a local history-cleanup * * \param[in] target Cleanup can be limited to certain fence-targets * \param[in] broadcast Send out a cleanup broadcast */ static void stonith_fence_history_cleanup(const char *target, gboolean broadcast) { if (broadcast) { stonith_send_broadcast_history(NULL, st_opt_cleanup | st_opt_discard_reply, target); /* we'll do the local clean when we receive back our own broadcast */ } else if (stonith_remote_op_list) { g_hash_table_foreach_remove(stonith_remote_op_list, stonith_remove_history_entry, (gpointer) target); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } } /* keeping the length of fence-history within bounds * ================================================= * * If things are really running wild a lot of fencing-attempts * might fill up the hash-map, eventually using up a lot * of memory and creating huge history-sync messages. * Before the history being synced across nodes at least * the reboot of a cluster-node helped keeping the * history within bounds even though not in a reliable * manner. * * stonith_remote_op_list isn't sorted for time-stamps * thus it would be kind of expensive to delete e.g. * the oldest entry if it would grow past MAX_STONITH_HISTORY * entries. * It is more efficient to purge MAX_STONITH_HISTORY/2 * entries whenever the list grows beyond MAX_STONITH_HISTORY. * (sort for age + purge the MAX_STONITH_HISTORY/2 oldest) * That done on a per-node-base might raise the * probability of large syncs to occur. * Things like introducing a broadcast to purge * MAX_STONITH_HISTORY/2 entries or not sync above a certain * threshold coming to mind ... * Simplest thing though is to purge the full history * throughout the cluster once MAX_STONITH_HISTORY is reached. * On the other hand this leads to purging the history in * situations where it would be handy to have it probably. */ /*! * \internal * \brief Compare two remote fencing operations by status and completion time * * A pending operation is ordered before a completed operation. If both * operations have completed, then the more recently completed operation is * ordered first. Two pending operations are considered equal. * * \param[in] a First \c remote_fencing_op_t to compare * \param[in] b Second \c remote_fencing_op_t to compare * * \return Standard comparison result (a negative integer if \p a is lesser, * 0 if the values are equal, and a positive integer if \p a is greater) */ static gint cmp_op_by_completion(gconstpointer a, gconstpointer b) { const remote_fencing_op_t *op1 = a; const remote_fencing_op_t *op2 = b; bool op1_pending = stonith__op_state_pending(op1->state); bool op2_pending = stonith__op_state_pending(op2->state); if (op1_pending && op2_pending) { return 0; } if (op1_pending) { return -1; } if (op2_pending) { return 1; } if (op1->completed > op2->completed) { return -1; } if (op1->completed < op2->completed) { return 1; } if (op1->completed_nsec > op2->completed_nsec) { return -1; } if (op1->completed_nsec < op2->completed_nsec) { return 1; } return 0; } /*! * \internal * \brief Remove a completed operation from \c stonith_remote_op_list * * \param[in] data \c remote_fencing_op_t to remove * \param[in] user_data Ignored */ static void remove_completed_remote_op(gpointer data, gpointer user_data) { const remote_fencing_op_t *op = data; if (!stonith__op_state_pending(op->state)) { g_hash_table_remove(stonith_remote_op_list, op->id); } } /*! * \internal * \brief Do a local history-trim to MAX_STONITH_HISTORY / 2 entries * once over MAX_STONITH_HISTORY */ void stonith_fence_history_trim(void) { if (stonith_remote_op_list == NULL) { return; } if (g_hash_table_size(stonith_remote_op_list) > MAX_STONITH_HISTORY) { GList *ops = g_hash_table_get_values(stonith_remote_op_list); crm_trace("More than %d entries in fencing history, purging oldest " "completed operations", MAX_STONITH_HISTORY); ops = g_list_sort(ops, cmp_op_by_completion); // Always keep pending ops regardless of number of entries g_list_foreach(g_list_nth(ops, MAX_STONITH_HISTORY / 2), remove_completed_remote_op, NULL); // No need for a notification after purging old data g_list_free(ops); } } /*! * \internal * \brief Convert xml fence-history to a hash-table like stonith_remote_op_list * * \param[in] history Fence-history in xml * * \return Fence-history as hash-table */ static GHashTable * stonith_xml_history_to_list(const xmlNode *history) { xmlNode *xml_op = NULL; GHashTable *rv = NULL; init_stonith_remote_op_hash_table(&rv); CRM_LOG_ASSERT(rv != NULL); for (xml_op = pcmk__xe_first_child(history, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { remote_fencing_op_t *op = NULL; char *id = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_REMOTE_OP); int state; int exit_status = CRM_EX_OK; int execution_status = PCMK_EXEC_DONE; if (!id) { crm_warn("Malformed fencing history received from peer"); continue; } crm_trace("Attaching op %s to hashtable", id); op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); op->id = id; op->target = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_TARGET); op->action = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_DEVICE_ACTION); op->originator = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_ORIGIN); op->delegate = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_DELEGATE); op->client_name = pcmk__xe_get_copy(xml_op, PCMK__XA_ST_CLIENTNAME); pcmk__xe_get_time(xml_op, PCMK__XA_ST_DATE, &op->completed); pcmk__xe_get_ll(xml_op, PCMK__XA_ST_DATE_NSEC, &op->completed_nsec); pcmk__xe_get_int(xml_op, PCMK__XA_ST_STATE, &state); op->state = (enum op_state) state; /* @COMPAT We can't use stonith__xe_get_result() here because * fencers <2.1.3 didn't include results, leading it to assume an error * status. Instead, set an unknown status in that case. */ if ((pcmk__xe_get_int(xml_op, PCMK__XA_RC_CODE, &exit_status) != pcmk_rc_ok) || (pcmk__xe_get_int(xml_op, PCMK__XA_OP_STATUS, &execution_status) != pcmk_rc_ok)) { exit_status = CRM_EX_INDETERMINATE; execution_status = PCMK_EXEC_UNKNOWN; } pcmk__set_result(&op->result, exit_status, execution_status, pcmk__xe_get(xml_op, PCMK_XA_EXIT_REASON)); pcmk__set_result_output(&op->result, pcmk__xe_get_copy(xml_op, PCMK__XA_ST_OUTPUT), NULL); g_hash_table_replace(rv, id, op); CRM_LOG_ASSERT(g_hash_table_lookup(rv, id) != NULL); } return rv; } /*! * \internal * \brief Craft xml difference between local fence-history and a history * coming from remote, and merge the remote history into the local * * \param[in,out] remote_history Fence-history as hash-table (may be NULL) * \param[in] add_id If crafting the answer for an API * history-request there is no need for the id * \param[in] target Optionally limit to certain fence-target * * \return The fence-history as xml */ static xmlNode * stonith_local_history_diff_and_merge(GHashTable *remote_history, gboolean add_id, const char *target) { xmlNode *history = NULL; GHashTableIter iter; remote_fencing_op_t *op = NULL; gboolean updated = FALSE; int cnt = 0; if (stonith_remote_op_list) { char *id = NULL; history = pcmk__xe_create(NULL, PCMK__XE_ST_HISTORY); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, (void **)&id, (void **)&op)) { xmlNode *entry = NULL; if (remote_history) { remote_fencing_op_t *remote_op = g_hash_table_lookup(remote_history, op->id); if (remote_op) { if (stonith__op_state_pending(op->state) && !stonith__op_state_pending(remote_op->state)) { crm_debug("Updating outdated pending operation %.8s " "(state=%s) according to the one (state=%s) from " "remote peer history", op->id, stonith__op_state_text(op->state), stonith__op_state_text(remote_op->state)); g_hash_table_steal(remote_history, op->id); op->id = remote_op->id; remote_op->id = id; g_hash_table_iter_replace(&iter, remote_op); updated = TRUE; continue; /* skip outdated entries */ } else if (!stonith__op_state_pending(op->state) && stonith__op_state_pending(remote_op->state)) { crm_debug("Broadcasting operation %.8s (state=%s) to " "update the outdated pending one " "(state=%s) in remote peer history", op->id, stonith__op_state_text(op->state), stonith__op_state_text(remote_op->state)); g_hash_table_remove(remote_history, op->id); } else { g_hash_table_remove(remote_history, op->id); continue; /* skip entries broadcasted already */ } } } if (!pcmk__str_eq(target, op->target, pcmk__str_null_matches)) { continue; } cnt++; crm_trace("Attaching op %s", op->id); entry = pcmk__xe_create(history, STONITH_OP_EXEC); if (add_id) { pcmk__xe_set(entry, PCMK__XA_ST_REMOTE_OP, op->id); } pcmk__xe_set(entry, PCMK__XA_ST_TARGET, op->target); pcmk__xe_set(entry, PCMK__XA_ST_DEVICE_ACTION, op->action); pcmk__xe_set(entry, PCMK__XA_ST_ORIGIN, op->originator); pcmk__xe_set(entry, PCMK__XA_ST_DELEGATE, op->delegate); pcmk__xe_set(entry, PCMK__XA_ST_CLIENTNAME, op->client_name); pcmk__xe_set_time(entry, PCMK__XA_ST_DATE, op->completed); pcmk__xe_set_ll(entry, PCMK__XA_ST_DATE_NSEC, op->completed_nsec); pcmk__xe_set_int(entry, PCMK__XA_ST_STATE, op->state); stonith__xe_set_result(entry, &op->result); } } if (remote_history) { init_stonith_remote_op_hash_table(&stonith_remote_op_list); updated |= g_hash_table_size(remote_history); g_hash_table_iter_init(&iter, remote_history); while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) { if (stonith__op_state_pending(op->state) && pcmk__str_eq(op->originator, fenced_get_local_node(), pcmk__str_casei)) { crm_warn("Failing pending operation %.8s originated by us but " "known only from peer history", op->id); op->state = st_failed; set_fencing_completed(op); /* CRM_EX_EXPIRED + PCMK_EXEC_INVALID prevents finalize_op() * from setting a delegate */ pcmk__set_result(&op->result, CRM_EX_EXPIRED, PCMK_EXEC_INVALID, "Initiated by earlier fencer " "process and presumed failed"); fenced_broadcast_op_result(op, false); } g_hash_table_iter_steal(&iter); g_hash_table_replace(stonith_remote_op_list, op->id, op); /* we could trim the history here but if we bail * out after trim we might miss more recent entries * of those that might still be in the list * if we don't bail out trimming once is more * efficient and memory overhead is minimal as * we are just moving pointers from one hash to * another */ } g_hash_table_destroy(remote_history); /* remove what is left */ } if (updated) { stonith_fence_history_trim(); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } if (cnt == 0) { pcmk__xml_free(history); return NULL; } else { return history; } } /*! * \internal * \brief Craft xml from the local fence-history * * \param[in] add_id If crafting the answer for an API * history-request there is no need for the id * \param[in] target Optionally limit to certain fence-target * * \return The fence-history as xml */ static xmlNode * stonith_local_history(gboolean add_id, const char *target) { return stonith_local_history_diff_and_merge(NULL, add_id, target); } /*! * \internal * \brief Handle fence-history messages (from API or coming in as broadcasts) * * \param[in,out] msg Request XML * \param[out] output Where to set local history, if requested * \param[in] remote_peer If broadcast, peer that sent it * \param[in] options Call options from the request */ void stonith_fence_history(xmlNode *msg, xmlNode **output, const char *remote_peer, int options) { const char *target = NULL; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_NEVER); xmlNode *out_history = NULL; if (dev) { target = pcmk__xe_get(dev, PCMK__XA_ST_TARGET); if (target && (options & st_opt_cs_nodeid)) { int nodeid; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(target, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); if (node != NULL) { target = node->name; } } } if (options & st_opt_cleanup) { const char *call_id = pcmk__xe_get(msg, PCMK__XA_ST_CALLID); crm_trace("Cleaning up operations on %s in %p", target, stonith_remote_op_list); stonith_fence_history_cleanup(target, (call_id != NULL)); } else if (options & st_opt_broadcast) { /* there is no clear sign atm for when a history sync is done so send a notification for anything that smells like history-sync */ fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY_SYNCED, NULL, NULL); if (pcmk__xe_get(msg, PCMK__XA_ST_CALLID) != NULL) { /* this is coming from the stonith-API * * craft a broadcast with node's history * so that every node can merge and broadcast * what it has on top */ out_history = stonith_local_history(TRUE, NULL); crm_trace("Broadcasting history to peers"); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); } else if (remote_peer && !pcmk__str_eq(remote_peer, fenced_get_local_node(), pcmk__str_casei)) { xmlNode *history = pcmk__xpath_find_one(msg->doc, "//" PCMK__XE_ST_HISTORY, LOG_NEVER); /* either a broadcast created directly upon stonith-API request * or a diff as response to such a thing * * in both cases it may have a history or not * if we have differential data * merge in what we've received and stop * otherwise broadcast what we have on top * marking as differential and merge in afterwards */ if (!history || !pcmk__xe_attr_is_true(history, PCMK__XA_ST_DIFFERENTIAL)) { GHashTable *received_history = NULL; if (history != NULL) { received_history = stonith_xml_history_to_list(history); } out_history = stonith_local_history_diff_and_merge(received_history, TRUE, NULL); if (out_history) { crm_trace("Broadcasting history-diff to peers"); - pcmk__xe_set_bool_attr(out_history, - PCMK__XA_ST_DIFFERENTIAL, true); + pcmk__xe_set_bool(out_history, PCMK__XA_ST_DIFFERENTIAL, + true); stonith_send_broadcast_history(out_history, st_opt_broadcast | st_opt_discard_reply, NULL); } else { crm_trace("History-diff is empty - skip broadcast"); } } } else { crm_trace("Skipping history-query-broadcast (%s%s)" " we sent ourselves", remote_peer?"remote-peer=":"local-ipc", remote_peer?remote_peer:""); } } else { /* plain history request */ crm_trace("Looking for operations on %s in %p", target, stonith_remote_op_list); *output = stonith_local_history(FALSE, target); } pcmk__xml_free(out_history); } diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c index e59b1faa8e..43f2b9c558 100644 --- a/daemons/fenced/fenced_remote.c +++ b/daemons/fenced/fenced_remote.c @@ -1,2630 +1,2630 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include // xmlNode #include #include #include #include #include #include #include #include #include #include #define TIMEOUT_MULTIPLY_FACTOR 1.2 /* When one fencer queries its peers for devices able to handle a fencing * request, each peer will reply with a list of such devices available to it. * Each reply will be parsed into a peer_device_info_t, with each device's * information kept in a device_properties_t. */ typedef struct device_properties_s { /* Whether access to this device has been verified */ gboolean verified; /* The remaining members are indexed by the operation's "phase" */ /* Whether this device has been executed in each phase */ gboolean executed[st_phase_max]; /* Whether this device is disallowed from executing in each phase */ gboolean disallowed[st_phase_max]; /* Action-specific timeout for each phase */ int custom_action_timeout[st_phase_max]; /* Action-specific maximum random delay for each phase */ int delay_max[st_phase_max]; /* Action-specific base delay for each phase */ int delay_base[st_phase_max]; /* Group of enum st_device_flags */ uint32_t device_support_flags; } device_properties_t; typedef struct { /* Name of peer that sent this result */ char *host; /* Only try peers for non-topology based operations once */ gboolean tried; /* Number of entries in the devices table */ int ndevices; /* Devices available to this host that are capable of fencing the target */ GHashTable *devices; } peer_device_info_t; GHashTable *stonith_remote_op_list = NULL; extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); static void request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer); static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup); static void report_timeout_period(remote_fencing_op_t * op, int op_timeout); static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer); static gint sort_strings(gconstpointer a, gconstpointer b) { return strcmp(a, b); } static void free_remote_query(gpointer data) { if (data != NULL) { peer_device_info_t *peer = data; g_hash_table_destroy(peer->devices); free(peer->host); free(peer); } } void free_stonith_remote_op_list(void) { if (stonith_remote_op_list != NULL) { g_hash_table_destroy(stonith_remote_op_list); stonith_remote_op_list = NULL; } } struct peer_count_data { const remote_fencing_op_t *op; gboolean verified_only; uint32_t support_action_only; int count; }; /*! * \internal * \brief Increment a counter if a device has not been executed yet * * \param[in] key Device ID (ignored) * \param[in] value Device properties * \param[in,out] user_data Peer count data */ static void count_peer_device(gpointer key, gpointer value, gpointer user_data) { device_properties_t *props = (device_properties_t*)value; struct peer_count_data *data = user_data; if (!props->executed[data->op->phase] && (!data->verified_only || props->verified) && ((data->support_action_only == fenced_df_none) || pcmk__is_set(props->device_support_flags, data->support_action_only))) { ++(data->count); } } /*! * \internal * \brief Check the number of available devices in a peer's query results * * \param[in] op Operation that results are for * \param[in] peer Peer to count * \param[in] verified_only Whether to count only verified devices * \param[in] support_action_only Whether to count only devices that support action * * \return Number of devices available to peer that were not already executed */ static int count_peer_devices(const remote_fencing_op_t *op, const peer_device_info_t *peer, gboolean verified_only, uint32_t support_on_action_only) { struct peer_count_data data; data.op = op; data.verified_only = verified_only; data.support_action_only = support_on_action_only; data.count = 0; if (peer) { g_hash_table_foreach(peer->devices, count_peer_device, &data); } return data.count; } /*! * \internal * \brief Search for a device in a query result * * \param[in] op Operation that result is for * \param[in] peer Query result for a peer * \param[in] device Device ID to search for * * \return Device properties if found, NULL otherwise */ static device_properties_t * find_peer_device(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, uint32_t support_action_only) { device_properties_t *props = g_hash_table_lookup(peer->devices, device); if (props == NULL) { return NULL; } if ((support_action_only != fenced_df_none) && !pcmk__is_set(props->device_support_flags, support_action_only)) { return NULL; } if (props->executed[op->phase] || props->disallowed[op->phase]) { return NULL; } return props; } /*! * \internal * \brief Find a device in a peer's device list and mark it as executed * * \param[in] op Operation that peer result is for * \param[in,out] peer Peer with results to search * \param[in] device ID of device to mark as done * \param[in] verified_devices_only Only consider verified devices * * \return TRUE if device was found and marked, FALSE otherwise */ static gboolean grab_peer_device(const remote_fencing_op_t *op, peer_device_info_t *peer, const char *device, gboolean verified_devices_only) { device_properties_t *props = find_peer_device(op, peer, device, fenced_support_flag(op->action)); if ((props == NULL) || (verified_devices_only && !props->verified)) { return FALSE; } crm_trace("Removing %s from %s (%d remaining)", device, peer->host, count_peer_devices(op, peer, FALSE, fenced_df_none)); props->executed[op->phase] = TRUE; return TRUE; } static void clear_remote_op_timers(remote_fencing_op_t * op) { if (op->query_timer) { g_source_remove(op->query_timer); op->query_timer = 0; } if (op->op_timer_total) { g_source_remove(op->op_timer_total); op->op_timer_total = 0; } if (op->op_timer_one) { g_source_remove(op->op_timer_one); op->op_timer_one = 0; } } static void free_remote_op(gpointer data) { remote_fencing_op_t *op = data; crm_log_xml_debug(op->request, "Destroying"); clear_remote_op_timers(op); free(op->id); free(op->action); free(op->delegate); free(op->target); free(op->client_id); free(op->client_name); free(op->originator); if (op->query_results) { g_list_free_full(op->query_results, free_remote_query); } if (op->request) { pcmk__xml_free(op->request); op->request = NULL; } if (op->devices_list) { g_list_free_full(op->devices_list, free); op->devices_list = NULL; } g_list_free_full(op->automatic_list, free); g_list_free(op->duplicates); pcmk__reset_result(&op->result); free(op); } void init_stonith_remote_op_hash_table(GHashTable **table) { if (*table == NULL) { *table = pcmk__strkey_table(NULL, free_remote_op); } } /*! * \internal * \brief Return an operation's originally requested action (before any remap) * * \param[in] op Operation to check * * \return Operation's original action */ static const char * op_requested_action(const remote_fencing_op_t *op) { return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action); } /*! * \internal * \brief Remap a "reboot" operation to the "off" phase * * \param[in,out] op Operation to remap */ static void op_phase_off(remote_fencing_op_t *op) { crm_info("Remapping multiple-device reboot targeting %s to 'off' " QB_XS " id=%.8s", op->target, op->id); op->phase = st_phase_off; /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the * memory allocation at each phase. */ strcpy(op->action, PCMK_ACTION_OFF); } /*! * \internal * \brief Advance a remapped reboot operation to the "on" phase * * \param[in,out] op Operation to remap */ static void op_phase_on(remote_fencing_op_t *op) { GList *iter = NULL; crm_info("Remapped 'off' targeting %s complete, " "remapping to 'on' for %s " QB_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_on; strcpy(op->action, PCMK_ACTION_ON); /* Skip devices with automatic unfencing, because the cluster will handle it * when the node rejoins. */ for (iter = op->automatic_list; iter != NULL; iter = iter->next) { GList *match = g_list_find_custom(op->devices_list, iter->data, sort_strings); if (match) { op->devices_list = g_list_remove(op->devices_list, match->data); } } g_list_free_full(op->automatic_list, free); op->automatic_list = NULL; /* Rewind device list pointer */ op->devices = op->devices_list; } /*! * \internal * \brief Reset a remapped reboot operation * * \param[in,out] op Operation to reset */ static void undo_op_remap(remote_fencing_op_t *op) { if (op->phase > 0) { crm_info("Undoing remap of reboot targeting %s for %s " QB_XS " id=%.8s", op->target, op->client_name, op->id); op->phase = st_phase_requested; strcpy(op->action, PCMK_ACTION_REBOOT); } } /*! * \internal * \brief Create notification data XML for a fencing operation result * * \param[in,out] parent Parent XML element for newly created element * \param[in] op Fencer operation that completed * * \return Newly created XML to add as notification data * \note The caller is responsible for freeing the result. */ static xmlNode * fencing_result2xml(xmlNode *parent, const remote_fencing_op_t *op) { xmlNode *notify_data = pcmk__xe_create(parent, PCMK__XE_ST_NOTIFY_FENCE); pcmk__xe_set_int(notify_data, PCMK_XA_STATE, op->state); pcmk__xe_set(notify_data, PCMK__XA_ST_TARGET, op->target); pcmk__xe_set(notify_data, PCMK__XA_ST_DEVICE_ACTION, op->action); pcmk__xe_set(notify_data, PCMK__XA_ST_DELEGATE, op->delegate); pcmk__xe_set(notify_data, PCMK__XA_ST_REMOTE_OP, op->id); pcmk__xe_set(notify_data, PCMK__XA_ST_ORIGIN, op->originator); pcmk__xe_set(notify_data, PCMK__XA_ST_CLIENTID, op->client_id); pcmk__xe_set(notify_data, PCMK__XA_ST_CLIENTNAME, op->client_name); return notify_data; } /*! * \internal * \brief Broadcast a fence result notification to all CPG peers * * \param[in] op Fencer operation that completed * \param[in] op_merged Whether this operation is a duplicate of another */ void fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged) { static int count = 0; xmlNode *bcast = pcmk__xe_create(NULL, PCMK__XE_ST_REPLY); xmlNode *wrapper = NULL; xmlNode *notify_data = NULL; count++; crm_trace("Broadcasting result to peers"); pcmk__xe_set(bcast, PCMK__XA_T, PCMK__VALUE_ST_NOTIFY); pcmk__xe_set(bcast, PCMK__XA_SUBT, PCMK__VALUE_BROADCAST); pcmk__xe_set(bcast, PCMK__XA_ST_OP, STONITH_OP_NOTIFY); pcmk__xe_set_int(bcast, PCMK_XA_COUNT, count); if (op_merged) { - pcmk__xe_set_bool_attr(bcast, PCMK__XA_ST_OP_MERGED, true); + pcmk__xe_set_bool(bcast, PCMK__XA_ST_OP_MERGED, true); } wrapper = pcmk__xe_create(bcast, PCMK__XE_ST_CALLDATA); notify_data = fencing_result2xml(wrapper, op); stonith__xe_set_result(notify_data, &op->result); pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, bcast); pcmk__xml_free(bcast); return; } /*! * \internal * \brief Reply to a local request originator and notify all subscribed clients * * \param[in,out] op Fencer operation that completed * \param[in,out] data Top-level XML to add notification to */ static void handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data) { xmlNode *notify_data = NULL; xmlNode *reply = NULL; pcmk__client_t *client = NULL; if (op->notify_sent == TRUE) { /* nothing to do */ return; } /* Do notification with a clean data object */ pcmk__xe_set_int(data, PCMK_XA_STATE, op->state); pcmk__xe_set(data, PCMK__XA_ST_TARGET, op->target); pcmk__xe_set(data, PCMK__XA_ST_OP, op->action); reply = fenced_construct_reply(op->request, data, &op->result); pcmk__xe_set(reply, PCMK__XA_ST_DELEGATE, op->delegate); /* Send fencing OP reply to local client that initiated fencing */ client = pcmk__find_client_by_id(op->client_id); if (client == NULL) { crm_trace("Skipping reply to %s: no longer a client", op->client_id); } else { do_local_reply(reply, client, op->call_options); } /* bcast to all local clients that the fencing operation happend */ notify_data = fencing_result2xml(NULL, op); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_FENCE, &op->result, notify_data); pcmk__xml_free(notify_data); fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); /* mark this op as having notify's already sent */ op->notify_sent = TRUE; pcmk__xml_free(reply); } /*! * \internal * \brief Finalize all duplicates of a given fencer operation * * \param[in,out] op Fencer operation that completed * \param[in,out] data Top-level XML to add notification to */ static void finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data) { for (GList *iter = op->duplicates; iter != NULL; iter = iter->next) { remote_fencing_op_t *other = iter->data; if (other->state == st_duplicate) { other->state = op->state; crm_debug("Performing duplicate notification for %s@%s: %s " QB_XS " id=%.8s", other->client_name, other->originator, pcmk_exec_status_str(op->result.execution_status), other->id); pcmk__copy_result(&op->result, &other->result); finalize_op(other, data, true); } else { // Possible if (for example) it timed out already crm_err("Skipping duplicate notification for %s@%s " QB_XS " state=%s id=%.8s", other->client_name, other->originator, stonith__op_state_text(other->state), other->id); } } } static char * delegate_from_xml(xmlNode *xml) { xmlNode *match = pcmk__xpath_find_one(xml->doc, "//*[@" PCMK__XA_ST_DELEGATE "]", LOG_NEVER); if (match == NULL) { return pcmk__xe_get_copy(xml, PCMK__XA_SRC); } else { return pcmk__xe_get_copy(match, PCMK__XA_ST_DELEGATE); } } /*! * \internal * \brief Finalize a peer fencing operation * * Clean up after a fencing operation completes. This function has two code * paths: the executioner uses it to broadcast the result to CPG peers, and then * each peer (including the executioner) uses it to process that broadcast and * notify its IPC clients of the result. * * \param[in,out] op Fencer operation that completed * \param[in,out] data If not NULL, XML reply of last delegated operation * \param[in] dup Whether this operation is a duplicate of another * (in which case, do not broadcast the result) * * \note The operation result should be set before calling this function. */ static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup) { int level = LOG_ERR; const char *subt = NULL; xmlNode *local_data = NULL; gboolean op_merged = FALSE; CRM_CHECK((op != NULL), return); // This is a no-op if timers have already been cleared clear_remote_op_timers(op); if (op->notify_sent) { // Most likely, this is a timed-out action that eventually completed crm_notice("Operation '%s'%s%s by %s for %s@%s%s: " "Result arrived too late " QB_XS " id=%.8s", op->action, (op->target? " targeting " : ""), (op->target? op->target : ""), (op->delegate? op->delegate : "unknown node"), op->client_name, op->originator, (op_merged? " (merged)" : ""), op->id); return; } set_fencing_completed(op); undo_op_remap(op); if (data == NULL) { data = pcmk__xe_create(NULL, "remote-op"); local_data = data; } else if (op->delegate == NULL) { switch (op->result.execution_status) { case PCMK_EXEC_NO_FENCE_DEVICE: break; case PCMK_EXEC_INVALID: if (op->result.exit_status != CRM_EX_EXPIRED) { op->delegate = delegate_from_xml(data); } break; default: op->delegate = delegate_from_xml(data); break; } } if (dup || (pcmk__xe_get(data, PCMK__XA_ST_OP_MERGED) != NULL)) { op_merged = true; } /* Tell everyone the operation is done, we will continue * with doing the local notifications once we receive * the broadcast back. */ subt = pcmk__xe_get(data, PCMK__XA_SUBT); if (!dup && !pcmk__str_eq(subt, PCMK__VALUE_BROADCAST, pcmk__str_none)) { /* Defer notification until the bcast message arrives */ fenced_broadcast_op_result(op, op_merged); pcmk__xml_free(local_data); return; } if (pcmk__result_ok(&op->result) || dup || !pcmk__str_eq(op->originator, fenced_get_local_node(), pcmk__str_casei)) { level = LOG_NOTICE; } do_crm_log(level, "Operation '%s'%s%s by %s for %s@%s%s: %s (%s%s%s) " QB_XS " id=%.8s", op->action, (op->target? " targeting " : ""), (op->target? op->target : ""), (op->delegate? op->delegate : "unknown node"), op->client_name, op->originator, (op_merged? " (merged)" : ""), crm_exit_str(op->result.exit_status), pcmk_exec_status_str(op->result.execution_status), ((op->result.exit_reason == NULL)? "" : ": "), ((op->result.exit_reason == NULL)? "" : op->result.exit_reason), op->id); handle_local_reply_and_notify(op, data); if (!dup) { finalize_op_duplicates(op, data); } /* Free non-essential parts of the record * Keep the record around so we can query the history */ if (op->query_results) { g_list_free_full(op->query_results, free_remote_query); op->query_results = NULL; } if (op->request) { pcmk__xml_free(op->request); op->request = NULL; } pcmk__xml_free(local_data); } /*! * \internal * \brief Finalize a watchdog fencer op after the waiting time expires * * \param[in,out] userdata Fencer operation that completed * * \return G_SOURCE_REMOVE (which tells glib not to restart timer) */ static gboolean remote_op_watchdog_done(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_one = 0; crm_notice("Self-fencing (%s) by %s for %s assumed complete " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); op->state = st_done; pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); finalize_op(op, NULL, false); return G_SOURCE_REMOVE; } static gboolean remote_op_timeout_one(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_one = 0; crm_notice("Peer's '%s' action targeting %s for client %s timed out " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, "Peer did not return fence result within timeout"); // The requested delay has been applied for the first device if (op->client_delay > 0) { op->client_delay = 0; crm_trace("Try another device for '%s' action targeting %s " "for client %s without delay " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); } // Try another device, if appropriate request_peer_fencing(op, NULL); return G_SOURCE_REMOVE; } /*! * \internal * \brief Finalize a remote fencer operation that timed out * * \param[in,out] op Fencer operation that timed out * \param[in] reason Readable description of what step timed out */ static void finalize_timed_out_op(remote_fencing_op_t *op, const char *reason) { crm_debug("Action '%s' targeting %s for client %s timed out " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); if (op->phase == st_phase_on) { /* A remapped reboot operation timed out in the "on" phase, but the * "off" phase completed successfully, so quit trying any further * devices, and return success. */ op->state = st_done; pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { op->state = st_failed; pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, reason); } finalize_op(op, NULL, false); } /*! * \internal * \brief Finalize a remote fencer operation that timed out * * \param[in,out] userdata Fencer operation that timed out * * \return G_SOURCE_REMOVE (which tells glib not to restart timer) */ static gboolean remote_op_timeout(gpointer userdata) { remote_fencing_op_t *op = userdata; op->op_timer_total = 0; if (op->state == st_done) { crm_debug("Action '%s' targeting %s for client %s already completed " QB_XS " id=%.8s", op->action, op->target, op->client_name, op->id); } else { finalize_timed_out_op(userdata, "Fencing did not complete within a " "total timeout based on the " "configured timeout and retries for " "any devices attempted"); } return G_SOURCE_REMOVE; } static gboolean remote_op_query_timeout(gpointer data) { remote_fencing_op_t *op = data; op->query_timer = 0; if (op->state == st_done) { crm_debug("Operation %.8s targeting %s already completed", op->id, op->target); } else if (op->state == st_exec) { crm_debug("Operation %.8s targeting %s already in progress", op->id, op->target); } else if (op->query_results) { // Query succeeded, so attempt the actual fencing crm_debug("Query %.8s targeting %s complete (state=%s)", op->id, op->target, stonith__op_state_text(op->state)); request_peer_fencing(op, NULL); } else { crm_debug("Query %.8s targeting %s timed out (state=%s)", op->id, op->target, stonith__op_state_text(op->state)); finalize_timed_out_op(op, "No capable peers replied to device query " "within timeout"); } return G_SOURCE_REMOVE; } static gboolean topology_is_empty(stonith_topology_t *tp) { int i; if (tp == NULL) { return TRUE; } for (i = 0; i < ST__LEVEL_COUNT; i++) { if (tp->levels[i] != NULL) { return FALSE; } } return TRUE; } /*! * \internal * \brief Add a device to an operation's automatic unfencing list * * \param[in,out] op Operation to modify * \param[in] device Device ID to add */ static void add_required_device(remote_fencing_op_t *op, const char *device) { GList *match = g_list_find_custom(op->automatic_list, device, sort_strings); if (!match) { op->automatic_list = g_list_prepend(op->automatic_list, pcmk__str_copy(device)); } } /*! * \internal * \brief Remove a device from the automatic unfencing list * * \param[in,out] op Operation to modify * \param[in] device Device ID to remove */ static void remove_required_device(remote_fencing_op_t *op, const char *device) { GList *match = g_list_find_custom(op->automatic_list, device, sort_strings); if (match) { op->automatic_list = g_list_remove(op->automatic_list, match->data); } } /* deep copy the device list */ static void set_op_device_list(remote_fencing_op_t * op, GList *devices) { GList *lpc = NULL; if (op->devices_list) { g_list_free_full(op->devices_list, free); op->devices_list = NULL; } for (lpc = devices; lpc != NULL; lpc = lpc->next) { const char *device = lpc->data; op->devices_list = g_list_append(op->devices_list, pcmk__str_copy(device)); } op->devices = op->devices_list; } /*! * \internal * \brief Check whether a node matches a topology target * * \param[in] tp Topology table entry to check * \param[in] node Name of node to check * * \return TRUE if node matches topology target */ static gboolean topology_matches(const stonith_topology_t *tp, const char *node) { regex_t r_patt; CRM_CHECK(node && tp && tp->target, return FALSE); switch (tp->kind) { case fenced_target_by_attribute: /* This level targets by attribute, so tp->target is a NAME=VALUE pair * of a permanent attribute applied to targeted nodes. The test below * relies on the locally cached copy of the CIB, so if fencing needs to * be done before the initial CIB is received or after a malformed CIB * is received, then the topology will be unable to be used. */ if (node_has_attr(node, tp->target_attribute, tp->target_value)) { crm_notice("Matched %s with %s by attribute", node, tp->target); return TRUE; } break; case fenced_target_by_pattern: /* This level targets node names matching a pattern, so tp->target * (and tp->target_pattern) is a regular expression. */ if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) { crm_info("Bad regex '%s' for fencing level", tp->target); } else { int status = regexec(&r_patt, node, 0, NULL, 0); regfree(&r_patt); if (status == 0) { crm_notice("Matched %s with %s by name", node, tp->target); return TRUE; } } break; case fenced_target_by_name: crm_trace("Testing %s against %s", node, tp->target); return pcmk__str_eq(tp->target, node, pcmk__str_casei); default: break; } crm_trace("No match for %s with %s", node, tp->target); return FALSE; } stonith_topology_t * find_topology_for_host(const char *host) { GHashTableIter tIter; stonith_topology_t *tp = g_hash_table_lookup(topology, host); if(tp != NULL) { crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology)); return tp; } g_hash_table_iter_init(&tIter, topology); while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) { if (topology_matches(tp, host)) { crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology)); return tp; } } crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology)); return NULL; } /*! * \internal * \brief Set fencing operation's device list to target's next topology level * * \param[in,out] op Remote fencing operation to modify * \param[in] empty_ok If true, an operation without a target (i.e. * queries) or a target without a topology will get a * pcmk_rc_ok return value instead of ENODEV * * \return Standard Pacemaker return value */ static int advance_topology_level(remote_fencing_op_t *op, bool empty_ok) { stonith_topology_t *tp = NULL; if (op->target) { tp = find_topology_for_host(op->target); } if (topology_is_empty(tp)) { return empty_ok? pcmk_rc_ok : ENODEV; } pcmk__assert(tp->levels != NULL); stonith__set_call_options(op->call_options, op->id, st_opt_topology); /* This is a new level, so undo any remapping left over from previous */ undo_op_remap(op); do { op->level++; } while (op->level < ST__LEVEL_COUNT && tp->levels[op->level] == NULL); if (op->level < ST__LEVEL_COUNT) { crm_trace("Attempting fencing level %d targeting %s (%d devices) " "for client %s@%s (id=%.8s)", op->level, op->target, g_list_length(tp->levels[op->level]), op->client_name, op->originator, op->id); set_op_device_list(op, tp->levels[op->level]); // The requested delay has been applied for the first fencing level if ((op->level > 1) && (op->client_delay > 0)) { op->client_delay = 0; } if ((g_list_next(op->devices_list) != NULL) && pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) { /* A reboot has been requested for a topology level with multiple * devices. Instead of rebooting the devices sequentially, we will * turn them all off, then turn them all on again. (Think about * switched power outlets for redundant power supplies.) */ op_phase_off(op); } return pcmk_rc_ok; } crm_info("All %sfencing options targeting %s for client %s@%s failed " QB_XS " id=%.8s", (stonith_watchdog_timeout_ms > 0)?"non-watchdog ":"", op->target, op->client_name, op->originator, op->id); return ENODEV; } /*! * \internal * \brief If fencing operation is a duplicate, merge it into the other one * * \param[in,out] op Fencing operation to check */ static void merge_duplicates(remote_fencing_op_t *op) { GHashTableIter iter; remote_fencing_op_t *other = NULL; time_t now = time(NULL); g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) { const char *other_action = op_requested_action(other); pcmk__node_status_t *node = NULL; if (!strcmp(op->id, other->id)) { continue; // Don't compare against self } if (other->state > st_exec) { crm_trace("%.8s not duplicate of %.8s: not in progress", op->id, other->id); continue; } if (!pcmk__str_eq(op->target, other->target, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: node %s vs. %s", op->id, other->id, op->target, other->target); continue; } if (!pcmk__str_eq(op->action, other_action, pcmk__str_none)) { crm_trace("%.8s not duplicate of %.8s: action %s vs. %s", op->id, other->id, op->action, other_action); continue; } if (pcmk__str_eq(op->client_name, other->client_name, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: same client %s", op->id, other->id, op->client_name); continue; } if (pcmk__str_eq(other->target, other->originator, pcmk__str_casei)) { crm_trace("%.8s not duplicate of %.8s: self-fencing for %s", op->id, other->id, other->target); continue; } node = pcmk__get_node(0, other->originator, NULL, pcmk__node_search_cluster_member); if (!fencing_peer_active(node)) { crm_notice("Failing action '%s' targeting %s originating from " "client %s@%s: Originator is dead " QB_XS " id=%.8s", other->action, other->target, other->client_name, other->originator, other->id); crm_trace("%.8s not duplicate of %.8s: originator dead", op->id, other->id); other->state = st_failed; continue; } if ((other->total_timeout > 0) && (now > (other->total_timeout + other->created))) { crm_trace("%.8s not duplicate of %.8s: old (%lld vs. %lld + %ds)", op->id, other->id, (long long)now, (long long)other->created, other->total_timeout); continue; } /* There is another in-flight request to fence the same host * Piggyback on that instead. If it fails, so do we. */ other->duplicates = g_list_append(other->duplicates, op); if (other->total_timeout == 0) { other->total_timeout = op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL); crm_trace("Best guess as to timeout used for %.8s: %ds", other->id, other->total_timeout); } crm_notice("Merging fencing action '%s' targeting %s originating from " "client %s with identical request from %s@%s " QB_XS " original=%.8s duplicate=%.8s total_timeout=%ds", op->action, op->target, op->client_name, other->client_name, other->originator, op->id, other->id, other->total_timeout); report_timeout_period(op, other->total_timeout); op->state = st_duplicate; } } static uint32_t fencing_active_peers(void) { uint32_t count = 0; pcmk__node_status_t *entry = NULL; GHashTableIter gIter; g_hash_table_iter_init(&gIter, pcmk__peer_cache); while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) { if(fencing_peer_active(entry)) { count++; } } return count; } /*! * \internal * \brief Process a manual confirmation of a pending fence action * * \param[in] client IPC client that sent confirmation * \param[in,out] msg Request XML with manual confirmation * * \return Standard Pacemaker return code */ int fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg) { remote_fencing_op_t *op = NULL; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_ERR); CRM_CHECK(dev != NULL, return EPROTO); crm_notice("Received manual confirmation that %s has been fenced", pcmk__s(pcmk__xe_get(dev, PCMK__XA_ST_TARGET), "unknown target")); op = initiate_remote_stonith_op(client, msg, TRUE); if (op == NULL) { return EPROTO; } op->state = st_done; op->delegate = pcmk__str_copy("a human"); // For the fencer's purposes, the fencing operation is done pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); finalize_op(op, msg, false); /* For the requester's purposes, the operation is still pending. The * actual result will be sent asynchronously via the operation's done_cb(). */ return EINPROGRESS; } /*! * \internal * \brief Create a new remote stonith operation * * \param[in] client ID of local stonith client that initiated the operation * \param[in] request The request from the client that started the operation * \param[in] peer TRUE if this operation is owned by another stonith peer * (an operation owned by one peer is stored on all peers, * but only the owner executes it; all nodes get the results * once the owner finishes execution) */ void * create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer) { remote_fencing_op_t *op = NULL; xmlNode *dev = pcmk__xpath_find_one(request->doc, "//*[@" PCMK__XA_ST_TARGET "]", LOG_NEVER); int rc = pcmk_rc_ok; const char *operation = NULL; init_stonith_remote_op_hash_table(&stonith_remote_op_list); /* If this operation is owned by another node, check to make * sure we haven't already created this operation. */ if (peer && dev) { const char *op_id = pcmk__xe_get(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(op_id != NULL, return NULL); op = g_hash_table_lookup(stonith_remote_op_list, op_id); if (op) { crm_debug("Reusing existing remote fencing op %.8s for %s", op_id, ((client == NULL)? "unknown client" : client)); return op; } } op = pcmk__assert_alloc(1, sizeof(remote_fencing_op_t)); pcmk__xe_get_int(request, PCMK__XA_ST_TIMEOUT, &(op->base_timeout)); // Value -1 means disable any static/random fencing delays pcmk__xe_get_int(request, PCMK__XA_ST_DELAY, &(op->client_delay)); if (peer && dev) { op->id = pcmk__xe_get_copy(dev, PCMK__XA_ST_REMOTE_OP); } else { op->id = pcmk__generate_uuid(); } g_hash_table_replace(stonith_remote_op_list, op->id, op); op->state = st_query; op->replies_expected = fencing_active_peers(); op->action = pcmk__xe_get_copy(dev, PCMK__XA_ST_DEVICE_ACTION); /* The node initiating the stonith operation. If an operation is relayed, * this is the last node the operation lands on. When in standalone mode, * origin is the ID of the client that originated the operation. * * Or may be the name of the function that created the operation. */ op->originator = pcmk__xe_get_copy(dev, PCMK__XA_ST_ORIGIN); if (op->originator == NULL) { /* Local or relayed request */ op->originator = pcmk__str_copy(fenced_get_local_node()); } // Delegate may not be set op->delegate = pcmk__xe_get_copy(dev, PCMK__XA_ST_DELEGATE); op->created = time(NULL); CRM_LOG_ASSERT(client != NULL); op->client_id = pcmk__str_copy(client); /* For a RELAY operation, set fenced on the client. */ operation = pcmk__xe_get(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { op->client_name = pcmk__assert_asprintf("%s.%lu", crm_system_name, (unsigned long) getpid()); } else { op->client_name = pcmk__xe_get_copy(request, PCMK__XA_ST_CLIENTNAME); } op->target = pcmk__xe_get_copy(dev, PCMK__XA_ST_TARGET); // @TODO Figure out how to avoid copying XML here op->request = pcmk__xml_copy(NULL, request); rc = pcmk__xe_get_flags(request, PCMK__XA_ST_CALLOPT, &(op->call_options), 0U); if (rc != pcmk_rc_ok) { crm_warn("Couldn't parse options from request %s: %s", op->id, pcmk_rc_str(rc)); } pcmk__xe_get_int(request, PCMK__XA_ST_CALLID, &(op->client_callid)); crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, " "base timeout %ds, %u %s expected)", (peer && dev)? "Recorded" : "Generated", op->id, op->action, op->target, op->client_name, op->base_timeout, op->replies_expected, pcmk__plural_alt(op->replies_expected, "reply", "replies")); if (op->call_options & st_opt_cs_nodeid) { int nodeid; pcmk__node_status_t *node = NULL; pcmk__scan_min_int(op->target, &nodeid, 0); node = pcmk__search_node_caches(nodeid, NULL, NULL, pcmk__node_search_any |pcmk__node_search_cluster_cib); /* Ensure the conversion only happens once */ stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid); if ((node != NULL) && (node->name != NULL)) { pcmk__str_update(&(op->target), node->name); } else { crm_warn("Could not expand nodeid '%s' into a host name", op->target); } } /* check to see if this is a duplicate operation of another in-flight operation */ merge_duplicates(op); if (op->state != st_duplicate) { /* kick history readers */ fenced_send_notification(PCMK__VALUE_ST_NOTIFY_HISTORY, NULL, NULL); } /* safe to trim as long as that doesn't touch pending ops */ stonith_fence_history_trim(); return op; } /*! * \internal * \brief Create a peer fencing operation from a request, and initiate it * * \param[in] client IPC client that made request (NULL to get from request) * \param[in] request Request XML * \param[in] manual_ack Whether this is a manual action confirmation * * \return Newly created operation on success, otherwise NULL */ remote_fencing_op_t * initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request, gboolean manual_ack) { int query_timeout = 0; xmlNode *query = NULL; const char *client_id = NULL; remote_fencing_op_t *op = NULL; const char *relay_op_id = NULL; const char *operation = NULL; if (client) { client_id = client->id; } else { client_id = pcmk__xe_get(request, PCMK__XA_ST_CLIENTID); } CRM_LOG_ASSERT(client_id != NULL); op = create_remote_stonith_op(client_id, request, FALSE); op->owner = TRUE; if (manual_ack) { return op; } CRM_CHECK(op->action, return NULL); if (advance_topology_level(op, true) != pcmk_rc_ok) { op->state = st_failed; } switch (op->state) { case st_failed: // advance_topology_level() exhausted levels pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_ERROR, "All topology levels failed"); crm_warn("Could not request peer fencing (%s) targeting %s " QB_XS " id=%.8s", op->action, op->target, op->id); finalize_op(op, NULL, false); return op; case st_duplicate: crm_info("Requesting peer fencing (%s) targeting %s (duplicate) " QB_XS " id=%.8s", op->action, op->target, op->id); return op; default: crm_notice("Requesting peer fencing (%s) targeting %s " QB_XS " id=%.8s state=%s base_timeout=%ds", op->action, op->target, op->id, stonith__op_state_text(op->state), op->base_timeout); } query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY, NULL, op->call_options); pcmk__xe_set(query, PCMK__XA_ST_REMOTE_OP, op->id); pcmk__xe_set(query, PCMK__XA_ST_TARGET, op->target); pcmk__xe_set(query, PCMK__XA_ST_DEVICE_ACTION, op_requested_action(op)); pcmk__xe_set(query, PCMK__XA_ST_ORIGIN, op->originator); pcmk__xe_set(query, PCMK__XA_ST_CLIENTID, op->client_id); pcmk__xe_set(query, PCMK__XA_ST_CLIENTNAME, op->client_name); pcmk__xe_set_int(query, PCMK__XA_ST_TIMEOUT, op->base_timeout); /* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */ operation = pcmk__xe_get(request, PCMK__XA_ST_OP); if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) { relay_op_id = pcmk__xe_get(request, PCMK__XA_ST_REMOTE_OP); if (relay_op_id) { pcmk__xe_set(query, PCMK__XA_ST_REMOTE_OP_RELAY, relay_op_id); } } pcmk__cluster_send_message(NULL, pcmk_ipc_fenced, query); pcmk__xml_free(query); query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR; op->query_timer = pcmk__create_timer((1000 * query_timeout), remote_op_query_timeout, op); return op; } enum find_best_peer_options { /*! Skip checking the target peer for capable fencing devices */ FIND_PEER_SKIP_TARGET = 0x0001, /*! Only check the target peer for capable fencing devices */ FIND_PEER_TARGET_ONLY = 0x0002, /*! Skip peers and devices that are not verified */ FIND_PEER_VERIFIED_ONLY = 0x0004, }; static bool is_watchdog_fencing(const remote_fencing_op_t *op, const char *device) { return (stonith_watchdog_timeout_ms > 0 // Only an explicit mismatch is considered not a watchdog fencing. && pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_null_matches) && pcmk__is_fencing_action(op->action) && node_does_watchdog_fencing(op->target)); } static peer_device_info_t * find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options) { GList *iter = NULL; gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE; if ((device == NULL) && pcmk__is_set(op->call_options, st_opt_topology)) { return NULL; } for (iter = op->query_results; iter != NULL; iter = iter->next) { peer_device_info_t *peer = iter->data; crm_trace("Testing result from %s targeting %s with %d device%s: %d %x", peer->host, op->target, peer->ndevices, pcmk__plural_s(peer->ndevices), peer->tried, options); if ((options & FIND_PEER_SKIP_TARGET) && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } if ((options & FIND_PEER_TARGET_ONLY) && !pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } if (pcmk__is_set(op->call_options, st_opt_topology)) { if (grab_peer_device(op, peer, device, verified_devices_only)) { return peer; } } else if (!peer->tried && count_peer_devices(op, peer, verified_devices_only, fenced_support_flag(op->action))) { /* No topology: Use the current best peer */ crm_trace("Simple fencing"); return peer; } } return NULL; } static peer_device_info_t * stonith_choose_peer(remote_fencing_op_t * op) { const char *device = NULL; peer_device_info_t *peer = NULL; uint32_t active = fencing_active_peers(); do { if (op->devices) { device = op->devices->data; crm_trace("Checking for someone to fence (%s) %s using %s", op->action, op->target, device); } else { crm_trace("Checking for someone to fence (%s) %s", op->action, op->target); } /* Best choice is a peer other than the target with verified access */ peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY); if (peer) { crm_trace("Found verified peer %s for %s", peer->host, device?device:""); return peer; } if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) { crm_trace("Waiting before looking for unverified devices to fence %s", op->target); return NULL; } /* If no other peer has verified access, next best is unverified access */ peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET); if (peer) { crm_trace("Found best unverified peer %s", peer->host); return peer; } /* If no other peer can do it, last option is self-fencing * (which is never allowed for the "on" phase of a remapped reboot) */ if (op->phase != st_phase_on) { peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY); if (peer) { crm_trace("%s will fence itself", peer->host); return peer; } } /* Try the next fencing level if there is one (unless we're in the "on" * phase of a remapped "reboot", because we ignore errors in that case) */ } while ((op->phase != st_phase_on) && pcmk__is_set(op->call_options, st_opt_topology) && (advance_topology_level(op, false) == pcmk_rc_ok)); /* With a simple watchdog fencing configuration without a topology, * "device" is NULL here. Consider it should be done with watchdog fencing. */ if (is_watchdog_fencing(op, device)) { crm_info("Couldn't contact watchdog-fencing target-node (%s)", op->target); /* check_watchdog_fencing_and_wait will log additional info */ } else { crm_notice("Couldn't find anyone to fence (%s) %s using %s", op->action, op->target, (device? device : "any device")); } return NULL; } static int valid_fencing_timeout(int specified_timeout, bool action_specific, const remote_fencing_op_t *op, const char *device) { int timeout = specified_timeout; if (!is_watchdog_fencing(op, device)) { return timeout; } timeout = (int) QB_MIN(QB_MAX(specified_timeout, pcmk__timeout_ms2s(stonith_watchdog_timeout_ms)), INT_MAX); if (timeout > specified_timeout) { if (action_specific) { crm_warn("pcmk_%s_timeout %ds for %s is too short (must be >= " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " "instead", op->action, specified_timeout, device? device : "watchdog", timeout, timeout); } else { crm_warn("Fencing timeout %ds is too short (must be >= " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT " %ds), using %ds " "instead", specified_timeout, timeout, timeout); } } return timeout; } static int get_device_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer, const char *device, bool with_delay) { int timeout = valid_fencing_timeout(op->base_timeout, false, op, device); device_properties_t *props; if (!peer || !device) { return timeout; } props = g_hash_table_lookup(peer->devices, device); if (!props) { return timeout; } if (props->custom_action_timeout[op->phase]) { timeout = valid_fencing_timeout(props->custom_action_timeout[op->phase], true, op, device); } // op->client_delay < 0 means disable any static/random fencing delays if (with_delay && (op->client_delay >= 0)) { // delay_base is eventually limited by delay_max timeout += (props->delay_max[op->phase] > 0 ? props->delay_max[op->phase] : props->delay_base[op->phase]); } return timeout; } struct timeout_data { const remote_fencing_op_t *op; const peer_device_info_t *peer; int total_timeout; }; /*! * \internal * \brief Add timeout to a total if device has not been executed yet * * \param[in] key GHashTable key (device ID) * \param[in] value GHashTable value (device properties) * \param[in,out] user_data Timeout data */ static void add_device_timeout(gpointer key, gpointer value, gpointer user_data) { const char *device_id = key; device_properties_t *props = value; struct timeout_data *timeout = user_data; if (!props->executed[timeout->op->phase] && !props->disallowed[timeout->op->phase]) { timeout->total_timeout += get_device_timeout(timeout->op, timeout->peer, device_id, true); } } static int get_peer_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer) { struct timeout_data timeout; timeout.op = op; timeout.peer = peer; timeout.total_timeout = 0; g_hash_table_foreach(peer->devices, add_device_timeout, &timeout); return (timeout.total_timeout? timeout.total_timeout : op->base_timeout); } static int get_op_total_timeout(const remote_fencing_op_t *op, const peer_device_info_t *chosen_peer) { long long total_timeout = 0; stonith_topology_t *tp = find_topology_for_host(op->target); if (pcmk__is_set(op->call_options, st_opt_topology) && (tp != NULL)) { int i; GList *device_list = NULL; GList *iter = NULL; GList *auto_list = NULL; if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none) && (op->automatic_list != NULL)) { auto_list = g_list_copy(op->automatic_list); } /* Yep, this looks scary, nested loops all over the place. * Here is what is going on. * Loop1: Iterate through fencing levels. * Loop2: If a fencing level has devices, loop through each device * Loop3: For each device in a fencing level, see what peer owns it * and what that peer has reported the timeout is for the device. */ for (i = 0; i < ST__LEVEL_COUNT; i++) { if (!tp->levels[i]) { continue; } for (device_list = tp->levels[i]; device_list; device_list = device_list->next) { bool found = false; for (iter = op->query_results; iter != NULL; iter = iter->next) { const peer_device_info_t *peer = iter->data; if (auto_list) { GList *match = g_list_find_custom(auto_list, device_list->data, sort_strings); if (match) { auto_list = g_list_remove(auto_list, match->data); } } if (find_peer_device(op, peer, device_list->data, fenced_support_flag(op->action))) { total_timeout += get_device_timeout(op, peer, device_list->data, true); found = true; break; } } /* End Loop3: match device with peer that owns device, find device's timeout period */ /* in case of watchdog-device we add the timeout to the budget if didn't get a reply */ if (!found && is_watchdog_fencing(op, device_list->data)) { total_timeout += pcmk__timeout_ms2s(stonith_watchdog_timeout_ms); } } /* End Loop2: iterate through devices at a specific level */ } /*End Loop1: iterate through fencing levels */ //Add only exists automatic_list device timeout if (auto_list) { for (iter = auto_list; iter != NULL; iter = iter->next) { GList *iter2 = NULL; for (iter2 = op->query_results; iter2 != NULL; iter2 = iter2->next) { peer_device_info_t *peer = iter2->data; if (find_peer_device(op, peer, iter->data, fenced_df_supports_on)) { total_timeout += get_device_timeout(op, peer, iter->data, true); break; } } } } g_list_free(auto_list); } else if (chosen_peer) { total_timeout = get_peer_timeout(op, chosen_peer); } else { total_timeout = valid_fencing_timeout(op->base_timeout, false, op, NULL); } if (total_timeout <= 0) { total_timeout = op->base_timeout; } /* Take any requested fencing delay into account to prevent it from eating * up the total timeout. */ if (op->client_delay > 0) { total_timeout += op->client_delay; } return (int) QB_MIN(total_timeout, INT_MAX); } static void report_timeout_period(remote_fencing_op_t * op, int op_timeout) { GList *iter = NULL; xmlNode *update = NULL; const char *client_node = NULL; const char *client_id = NULL; const char *call_id = NULL; if (op->call_options & st_opt_sync_call) { /* There is no reason to report the timeout for a synchronous call. It * is impossible to use the reported timeout to do anything when the client * is blocking for the response. This update is only important for * async calls that require a callback to report the results in. */ return; } else if (!op->request) { return; } crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id); client_node = pcmk__xe_get(op->request, PCMK__XA_ST_CLIENTNODE); call_id = pcmk__xe_get(op->request, PCMK__XA_ST_CALLID); client_id = pcmk__xe_get(op->request, PCMK__XA_ST_CLIENTID); if (!client_node || !call_id || !client_id) { return; } if (pcmk__str_eq(client_node, fenced_get_local_node(), pcmk__str_casei)) { // Client is connected to this node, so send update directly to them do_stonith_async_timeout_update(client_id, call_id, op_timeout); return; } /* The client is connected to another node, relay this update to them */ update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0); pcmk__xe_set(update, PCMK__XA_ST_REMOTE_OP, op->id); pcmk__xe_set(update, PCMK__XA_ST_CLIENTID, client_id); pcmk__xe_set(update, PCMK__XA_ST_CALLID, call_id); pcmk__xe_set_int(update, PCMK__XA_ST_TIMEOUT, op_timeout); pcmk__cluster_send_message(pcmk__get_node(0, client_node, NULL, pcmk__node_search_cluster_member), pcmk_ipc_fenced, update); pcmk__xml_free(update); for (iter = op->duplicates; iter != NULL; iter = iter->next) { remote_fencing_op_t *dup = iter->data; crm_trace("Reporting timeout for duplicate %.8s to client %s", dup->id, dup->client_name); report_timeout_period(iter->data, op_timeout); } } /*! * \internal * \brief Advance an operation to the next device in its topology * * \param[in,out] op Fencer operation to advance * \param[in] device ID of device that just completed * \param[in,out] msg If not NULL, XML reply of last delegated operation */ static void advance_topology_device_in_level(remote_fencing_op_t *op, const char *device, xmlNode *msg) { /* Advance to the next device at this topology level, if any */ if (op->devices) { op->devices = op->devices->next; } /* Handle automatic unfencing if an "on" action was requested */ if ((op->phase == st_phase_requested) && pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) { /* If the device we just executed was required, it's not anymore */ remove_required_device(op, device); /* If there are no more devices at this topology level, run through any * remaining devices with automatic unfencing */ if (op->devices == NULL) { op->devices = op->automatic_list; } } if ((op->devices == NULL) && (op->phase == st_phase_off)) { /* We're done with this level and with required devices, but we had * remapped "reboot" to "off", so start over with "on". If any devices * need to be turned back on, op->devices will be non-NULL after this. */ op_phase_on(op); } // This function is only called if the previous device succeeded pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (op->devices) { /* Necessary devices remain, so execute the next one */ crm_trace("Next targeting %s on behalf of %s@%s", op->target, op->client_name, op->originator); // The requested delay has been applied for the first device if (op->client_delay > 0) { op->client_delay = 0; } request_peer_fencing(op, NULL); } else { /* We're done with all devices and phases, so finalize operation */ crm_trace("Marking complex fencing op targeting %s as complete", op->target); op->state = st_done; finalize_op(op, msg, false); } } static gboolean check_watchdog_fencing_and_wait(remote_fencing_op_t * op) { if (node_does_watchdog_fencing(op->target)) { guint timeout_ms = QB_MIN(stonith_watchdog_timeout_ms, UINT_MAX); crm_notice("Waiting %s for %s to self-fence (%s) for " "client %s " QB_XS " id=%.8s", pcmk__readable_interval(timeout_ms), op->target, op->action, op->client_name, op->id); if (op->op_timer_one) { g_source_remove(op->op_timer_one); } op->op_timer_one = pcmk__create_timer(timeout_ms, remote_op_watchdog_done, op); return TRUE; } else { crm_debug("Skipping fallback to watchdog-fencing as %s is " "not in host-list", op->target); } return FALSE; } /*! * \internal * \brief Ask a peer to execute a fencing operation * * \param[in,out] op Fencing operation to be executed * \param[in,out] peer If NULL or topology is in use, choose best peer to * execute the fencing, otherwise use this peer */ static void request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer) { const char *device = NULL; int timeout; CRM_CHECK(op != NULL, return); crm_trace("Action %.8s targeting %s for %s is %s", op->id, op->target, op->client_name, stonith__op_state_text(op->state)); if ((op->phase == st_phase_on) && (op->devices != NULL)) { /* We are in the "on" phase of a remapped topology reboot. If this * device has pcmk_reboot_action="off", or doesn't support the "on" * action, skip it. * * We can't check device properties at this point because we haven't * chosen a peer for this stage yet. Instead, we check the local node's * knowledge about the device. If different versions of the fence agent * are installed on different nodes, there's a chance this could be * mistaken, but the worst that could happen is we don't try turning the * node back on when we should. */ device = op->devices->data; if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF, pcmk__str_none)) { crm_info("Not turning %s back on using %s because the device is " "configured to stay off (pcmk_reboot_action='off')", op->target, device); advance_topology_device_in_level(op, device, NULL); return; } if (!fenced_device_supports_on(device)) { crm_info("Not turning %s back on using %s because the agent " "doesn't support 'on'", op->target, device); advance_topology_device_in_level(op, device, NULL); return; } } timeout = op->base_timeout; if ((peer == NULL) && !pcmk__is_set(op->call_options, st_opt_topology)) { peer = stonith_choose_peer(op); } if (!op->op_timer_total) { op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, peer); op->op_timer_total = pcmk__create_timer(1000 * op->total_timeout, remote_op_timeout, op); report_timeout_period(op, op->total_timeout); crm_info("Total timeout set to %ds for peer's fencing targeting %s for %s " QB_XS " id=%.8s", op->total_timeout, op->target, op->client_name, op->id); } if (pcmk__is_set(op->call_options, st_opt_topology) && (op->devices != NULL)) { /* Ignore the caller's peer preference if topology is in use, because * that peer might not have access to the required device. With * topology, stonith_choose_peer() removes the device from further * consideration, so the timeout must be calculated beforehand. * * @TODO Basing the total timeout on the caller's preferred peer (above) * is less than ideal. */ peer = stonith_choose_peer(op); device = op->devices->data; /* Fencing timeout sent to peer takes no delay into account. * The peer will add a dedicated timer for any delay upon * schedule_stonith_command(). */ timeout = get_device_timeout(op, peer, device, false); } if (peer) { int timeout_one = 0; xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0); const pcmk__node_status_t *peer_node = pcmk__get_node(0, peer->host, NULL, pcmk__node_search_cluster_member); if (op->client_delay > 0) { /* Take requested fencing delay into account to prevent it from * eating up the timeout. */ timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay; } pcmk__xe_set(remote_op, PCMK__XA_ST_REMOTE_OP, op->id); pcmk__xe_set(remote_op, PCMK__XA_ST_TARGET, op->target); pcmk__xe_set(remote_op, PCMK__XA_ST_DEVICE_ACTION, op->action); pcmk__xe_set(remote_op, PCMK__XA_ST_ORIGIN, op->originator); pcmk__xe_set(remote_op, PCMK__XA_ST_CLIENTID, op->client_id); pcmk__xe_set(remote_op, PCMK__XA_ST_CLIENTNAME, op->client_name); pcmk__xe_set_int(remote_op, PCMK__XA_ST_TIMEOUT, timeout); pcmk__xe_set_int(remote_op, PCMK__XA_ST_CALLOPT, op->call_options); pcmk__xe_set_int(remote_op, PCMK__XA_ST_DELAY, op->client_delay); if (device) { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_device_timeout(op, peer, device, true); crm_notice("Requesting that %s perform '%s' action targeting %s " "using %s " QB_XS " for client %s (%ds)", peer->host, op->action, op->target, device, op->client_name, timeout_one); pcmk__xe_set(remote_op, PCMK__XA_ST_DEVICE_ID, device); } else { timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer); crm_notice("Requesting that %s perform '%s' action targeting %s " QB_XS " for client %s (%ds, %s)", peer->host, op->action, op->target, op->client_name, timeout_one, pcmk__readable_interval(stonith_watchdog_timeout_ms)); } op->state = st_exec; if (op->op_timer_one) { g_source_remove(op->op_timer_one); op->op_timer_one = 0; } if (!is_watchdog_fencing(op, device) || !check_watchdog_fencing_and_wait(op)) { /* Some thoughts about self-fencing cases reaching this point: - Actually check in check_watchdog_fencing_and_wait shouldn't fail if STONITH_WATCHDOG_ID is chosen as fencing-device and it being present implies watchdog-fencing is enabled anyway - If watchdog-fencing is disabled either in general or for a specific target - detected in check_watchdog_fencing_and_wait - for some other kind of self-fencing we can't expect a success answer but timeout is fine if the node doesn't come back in between - Delicate might be the case where we have watchdog-fencing enabled for a node but the watchdog-fencing-device isn't explicitly chosen for self-fencing. Local scheduler execution in sbd might detect the node as unclean and lead to timely self-fencing. Otherwise the selection of PCMK_OPT_STONITH_WATCHDOG_TIMEOUT at least is questionable. */ /* coming here we're not waiting for watchdog timeout - thus engage timer with timout evaluated before */ op->op_timer_one = pcmk__create_timer((1000 * timeout_one), remote_op_timeout_one, op); } pcmk__cluster_send_message(peer_node, pcmk_ipc_fenced, remote_op); peer->tried = TRUE; pcmk__xml_free(remote_op); return; } else if (op->phase == st_phase_on) { /* A remapped "on" cannot be executed, but the node was already * turned off successfully, so ignore the error and continue. */ crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s " "after successful 'off'", device, op->target); advance_topology_device_in_level(op, device, NULL); return; } else if (op->owner == FALSE) { crm_err("Fencing (%s) targeting %s for client %s is not ours to control", op->action, op->target, op->client_name); } else if (op->query_timer == 0) { /* We've exhausted all available peers */ crm_info("No remaining peers capable of fencing (%s) %s for client %s " QB_XS " state=%s", op->action, op->target, op->client_name, stonith__op_state_text(op->state)); CRM_CHECK(op->state < st_done, return); finalize_timed_out_op(op, "All nodes failed, or are unable, to " "fence target"); } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) { /* if the operation never left the query state, * but we have all the expected replies, then no devices * are available to execute the fencing operation. */ if (is_watchdog_fencing(op, device) && check_watchdog_fencing_and_wait(op)) { /* Consider a watchdog fencing targeting an offline node executing * once it starts waiting for the target to self-fence. So that when * the query timer pops, remote_op_query_timeout() considers the * fencing already in progress. */ op->state = st_exec; return; } if (op->state == st_query) { crm_info("No peers (out of %d) have devices capable of fencing " "(%s) %s for client %s " QB_XS " state=%s", op->replies, op->action, op->target, op->client_name, stonith__op_state_text(op->state)); pcmk__reset_result(&op->result); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); } else { if (pcmk__is_set(op->call_options, st_opt_topology)) { pcmk__reset_result(&op->result); pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_NO_FENCE_DEVICE, NULL); } /* ... else use existing result from previous failed attempt * (topology is not in use, and no devices remain to be attempted). * Overwriting the result with PCMK_EXEC_NO_FENCE_DEVICE would * prevent finalize_op() from setting the correct delegate if * needed. */ crm_info("No peers (out of %d) are capable of fencing (%s) %s " "for client %s " QB_XS " state=%s", op->replies, op->action, op->target, op->client_name, stonith__op_state_text(op->state)); } op->state = st_failed; finalize_op(op, NULL, false); } else { crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s " "for client %s " QB_XS " id=%.8s", op->action, op->target, (device? " using " : ""), (device? device : ""), op->client_name, op->id); } } /*! * \internal * \brief Comparison function for sorting query results * * \param[in] a GList item to compare * \param[in] b GList item to compare * * \return Per the glib documentation, "a negative integer if the first value * comes before the second, 0 if they are equal, or a positive integer * if the first value comes after the second." */ static gint sort_peers(gconstpointer a, gconstpointer b) { const peer_device_info_t *peer_a = a; const peer_device_info_t *peer_b = b; return (peer_b->ndevices - peer_a->ndevices); } /*! * \internal * \brief Determine if all the devices in the topology are found or not * * \param[in] op Fencing operation with topology to check */ static gboolean all_topology_devices_found(const remote_fencing_op_t *op) { GList *device = NULL; GList *iter = NULL; device_properties_t *match = NULL; stonith_topology_t *tp = NULL; gboolean skip_target = FALSE; int i; tp = find_topology_for_host(op->target); if (!tp) { return FALSE; } if (pcmk__is_fencing_action(op->action)) { /* Don't count the devices on the target node if we are killing * the target node. */ skip_target = TRUE; } for (i = 0; i < ST__LEVEL_COUNT; i++) { for (device = tp->levels[i]; device; device = device->next) { match = NULL; for (iter = op->query_results; iter && !match; iter = iter->next) { peer_device_info_t *peer = iter->data; if (skip_target && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) { continue; } match = find_peer_device(op, peer, device->data, fenced_df_none); } if (!match) { return FALSE; } } } return TRUE; } /*! * \internal * \brief Parse action-specific device properties from XML * * \param[in] xml XML element containing the properties * \param[in] peer Name of peer that sent XML (for logs) * \param[in] device Device ID (for logs) * \param[in] action Action the properties relate to (for logs) * \param[in,out] op Fencing operation that properties are being parsed for * \param[in] phase Phase the properties relate to * \param[in,out] props Device properties to update */ static void parse_action_specific(const xmlNode *xml, const char *peer, const char *device, const char *action, remote_fencing_op_t *op, enum st_remap_phase phase, device_properties_t *props) { props->custom_action_timeout[phase] = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_ACTION_TIMEOUT, &props->custom_action_timeout[phase]); if (props->custom_action_timeout[phase]) { crm_trace("Peer %s with device %s returned %s action timeout %ds", peer, device, action, props->custom_action_timeout[phase]); } props->delay_max[phase] = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_DELAY_MAX, &props->delay_max[phase]); if (props->delay_max[phase]) { crm_trace("Peer %s with device %s returned maximum of random delay %ds for %s", peer, device, props->delay_max[phase], action); } props->delay_base[phase] = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_DELAY_BASE, &props->delay_base[phase]); if (props->delay_base[phase]) { crm_trace("Peer %s with device %s returned base delay %ds for %s", peer, device, props->delay_base[phase], action); } /* Handle devices with automatic unfencing */ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) { int required = 0; pcmk__xe_get_int(xml, PCMK__XA_ST_REQUIRED, &required); if (required) { crm_trace("Peer %s requires device %s to execute for action %s", peer, device, action); add_required_device(op, device); } } /* If a reboot is remapped to off+on, it's possible that a node is allowed * to perform one action but not another. */ if (pcmk__xe_attr_is_true(xml, PCMK__XA_ST_ACTION_DISALLOWED)) { props->disallowed[phase] = TRUE; crm_trace("Peer %s is disallowed from executing %s for device %s", peer, action, device); } } /*! * \internal * \brief Parse one device's properties from peer's XML query reply * * \param[in] xml XML node containing device properties * \param[in,out] op Operation that query and reply relate to * \param[in,out] peer Peer's device information * \param[in] device ID of device being parsed */ static void add_device_properties(const xmlNode *xml, remote_fencing_op_t *op, peer_device_info_t *peer, const char *device) { xmlNode *child; int verified = 0; device_properties_t *props = pcmk__assert_alloc(1, sizeof(device_properties_t)); int rc = pcmk_rc_ok; /* Add a new entry to this peer's devices list */ g_hash_table_insert(peer->devices, pcmk__str_copy(device), props); /* Peers with verified (monitored) access will be preferred */ pcmk__xe_get_int(xml, PCMK__XA_ST_MONITOR_VERIFIED, &verified); if (verified) { crm_trace("Peer %s has confirmed a verified device %s", peer->host, device); props->verified = TRUE; } // Nodes <2.1.5 won't set this, so assume unfencing in that case rc = pcmk__xe_get_flags(xml, PCMK__XA_ST_DEVICE_SUPPORT_FLAGS, &(props->device_support_flags), fenced_df_supports_on); if (rc != pcmk_rc_ok) { crm_warn("Couldn't determine device support for %s " "(assuming unfencing): %s", device, pcmk_rc_str(rc)); } /* Parse action-specific device properties */ parse_action_specific(xml, peer->host, device, op_requested_action(op), op, st_phase_requested, props); for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { /* Replies for "reboot" operations will include the action-specific * values for "off" and "on" in child elements, just in case the reboot * winds up getting remapped. */ if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_OFF, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF, op, st_phase_off, props); } else if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_ON, pcmk__str_none)) { parse_action_specific(child, peer->host, device, PCMK_ACTION_ON, op, st_phase_on, props); } } } /*! * \internal * \brief Parse a peer's XML query reply and add it to operation's results * * \param[in,out] op Operation that query and reply relate to * \param[in] host Name of peer that sent this reply * \param[in] ndevices Number of devices expected in reply * \param[in] xml XML node containing device list * * \return Newly allocated result structure with parsed reply */ static peer_device_info_t * add_result(remote_fencing_op_t *op, const char *host, int ndevices, const xmlNode *xml) { peer_device_info_t *peer = pcmk__assert_alloc(1, sizeof(peer_device_info_t)); xmlNode *child; peer->host = pcmk__str_copy(host); peer->devices = pcmk__strkey_table(free, free); /* Each child element describes one capable device available to the peer */ for (child = pcmk__xe_first_child(xml, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { const char *device = pcmk__xe_id(child); if (device) { add_device_properties(child, op, peer, device); } } peer->ndevices = g_hash_table_size(peer->devices); CRM_CHECK(ndevices == peer->ndevices, crm_err("Query claimed to have %d device%s but %d found", ndevices, pcmk__plural_s(ndevices), peer->ndevices)); op->query_results = g_list_insert_sorted(op->query_results, peer, sort_peers); return peer; } /*! * \internal * \brief Handle a peer's reply to our fencing query * * Parse a query result from XML and store it in the remote operation * table, and when enough replies have been received, issue a fencing request. * * \param[in] msg XML reply received * * \return pcmk_ok on success, -errno on error * * \note See initiate_remote_stonith_op() for how the XML query was initially * formed, and stonith_query() for how the peer formed its XML reply. */ int process_remote_stonith_query(xmlNode *msg) { int ndevices = 0; gboolean host_is_target = FALSE; gboolean have_all_replies = FALSE; const char *id = NULL; const char *host = NULL; remote_fencing_op_t *op = NULL; peer_device_info_t *peer = NULL; uint32_t replies_expected; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_REMOTE_OP "]", LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); id = pcmk__xe_get(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return -EPROTO); dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_AVAILABLE_DEVICES "]", LOG_ERR); CRM_CHECK(dev != NULL, return -EPROTO); pcmk__xe_get_int(dev, PCMK__XA_ST_AVAILABLE_DEVICES, &ndevices); op = g_hash_table_lookup(stonith_remote_op_list, id); if (op == NULL) { crm_debug("Received query reply for unknown or expired operation %s", id); return -EOPNOTSUPP; } replies_expected = fencing_active_peers(); if (op->replies_expected < replies_expected) { replies_expected = op->replies_expected; } if ((++op->replies >= replies_expected) && (op->state == st_query)) { have_all_replies = TRUE; } host = pcmk__xe_get(msg, PCMK__XA_SRC); host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei); crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s", op->replies, replies_expected, host, op->target, op->action, ndevices, pcmk__plural_s(ndevices), id); if (ndevices > 0) { peer = add_result(op, host, ndevices, dev); } pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); if (pcmk__is_set(op->call_options, st_opt_topology)) { /* If we start the fencing before all the topology results are in, * it is possible fencing levels will be skipped because of the missing * query results. */ if (op->state == st_query && all_topology_devices_found(op)) { /* All the query results are in for the topology, start the fencing ops. */ crm_trace("All topology devices found"); request_peer_fencing(op, peer); } else if (have_all_replies) { crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ", replies_expected, op->replies); request_peer_fencing(op, NULL); } } else if (op->state == st_query) { int nverified = count_peer_devices(op, peer, TRUE, fenced_support_flag(op->action)); /* We have a result for a non-topology fencing op that looks promising, * go ahead and start fencing before query timeout */ if ((peer != NULL) && !host_is_target && nverified) { /* we have a verified device living on a peer that is not the target */ crm_trace("Found %d verified device%s", nverified, pcmk__plural_s(nverified)); request_peer_fencing(op, peer); } else if (have_all_replies) { crm_info("All query replies have arrived, continuing (%d expected/%d received) ", replies_expected, op->replies); request_peer_fencing(op, NULL); } else { crm_trace("Waiting for more peer results before launching fencing operation"); } } else if ((peer != NULL) && (op->state == st_done)) { crm_info("Discarding query result from %s (%d device%s): " "Operation is %s", peer->host, peer->ndevices, pcmk__plural_s(peer->ndevices), stonith__op_state_text(op->state)); } return pcmk_ok; } /*! * \internal * \brief Handle a peer's reply to a fencing request * * Parse a fencing reply from XML, and either finalize the operation * or attempt another device as appropriate. * * \param[in] msg XML reply received */ void fenced_process_fencing_reply(xmlNode *msg) { const char *id = NULL; const char *device = NULL; remote_fencing_op_t *op = NULL; xmlNode *dev = pcmk__xpath_find_one(msg->doc, "//*[@" PCMK__XA_ST_REMOTE_OP "]", LOG_ERR); pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(dev != NULL, return); id = pcmk__xe_get(dev, PCMK__XA_ST_REMOTE_OP); CRM_CHECK(id != NULL, return); dev = stonith__find_xe_with_result(msg); CRM_CHECK(dev != NULL, return); stonith__xe_get_result(dev, &result); device = pcmk__xe_get(dev, PCMK__XA_ST_DEVICE_ID); if (stonith_remote_op_list) { op = g_hash_table_lookup(stonith_remote_op_list, id); } if ((op == NULL) && pcmk__result_ok(&result)) { /* Record successful fencing operations */ const char *client_id = pcmk__xe_get(dev, PCMK__XA_ST_CLIENTID); op = create_remote_stonith_op(client_id, dev, TRUE); } if (op == NULL) { /* Could be for an event that began before we started */ /* TODO: Record the op for later querying */ crm_info("Received peer result of unknown or expired operation %s", id); pcmk__reset_result(&result); return; } pcmk__reset_result(&op->result); op->result = result; // The operation takes ownership of the result if (op->devices && device && !pcmk__str_eq(op->devices->data, device, pcmk__str_casei)) { crm_err("Received outdated reply for device %s (instead of %s) to " "fence (%s) %s. Operation already timed out at peer level.", device, (const char *) op->devices->data, op->action, op->target); return; } if (pcmk__str_eq(pcmk__xe_get(msg, PCMK__XA_SUBT), PCMK__VALUE_BROADCAST, pcmk__str_none)) { if (pcmk__result_ok(&op->result)) { op->state = st_done; } else { op->state = st_failed; } finalize_op(op, msg, false); return; } else if (!pcmk__str_eq(op->originator, fenced_get_local_node(), pcmk__str_casei)) { /* If this isn't a remote level broadcast, and we are not the * originator of the operation, we should not be receiving this msg. */ crm_err("Received non-broadcast fencing result for operation %.8s " "we do not own (device %s targeting %s)", op->id, device, op->target); return; } if (pcmk__is_set(op->call_options, st_opt_topology)) { const char *device = NULL; const char *reason = op->result.exit_reason; /* We own the op, and it is complete. broadcast the result to all nodes * and notify our local clients. */ if (op->state == st_done) { finalize_op(op, msg, false); return; } device = pcmk__xe_get(msg, PCMK__XA_ST_DEVICE_ID); if ((op->phase == 2) && !pcmk__result_ok(&op->result)) { /* A remapped "on" failed, but the node was already turned off * successfully, so ignore the error and continue. */ crm_warn("Ignoring %s 'on' failure (%s%s%s) targeting %s " "after successful 'off'", device, pcmk_exec_status_str(op->result.execution_status), (reason == NULL)? "" : ": ", (reason == NULL)? "" : reason, op->target); pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL); } else { crm_notice("Action '%s' targeting %s%s%s on behalf of %s@%s: " "%s%s%s%s", op->action, op->target, ((device == NULL)? "" : " using "), ((device == NULL)? "" : device), op->client_name, op->originator, pcmk_exec_status_str(op->result.execution_status), (reason == NULL)? "" : " (", (reason == NULL)? "" : reason, (reason == NULL)? "" : ")"); } if (pcmk__result_ok(&op->result)) { /* An operation completed successfully. Try another device if * necessary, otherwise mark the operation as done. */ advance_topology_device_in_level(op, device, msg); return; } else { /* This device failed, time to try another topology level. If no other * levels are available, mark this operation as failed and report results. */ if (advance_topology_level(op, false) != pcmk_rc_ok) { op->state = st_failed; finalize_op(op, msg, false); return; } } } else if (pcmk__result_ok(&op->result) && (op->devices == NULL)) { op->state = st_done; finalize_op(op, msg, false); return; } else if ((op->result.execution_status == PCMK_EXEC_TIMEOUT) && (op->devices == NULL)) { /* If the operation timed out don't bother retrying other peers. */ op->state = st_failed; finalize_op(op, msg, false); return; } else { /* fall-through and attempt other fencing action using another peer */ } /* Retry on failure */ crm_trace("Next for %s on behalf of %s@%s (result was: %s)", op->target, op->originator, op->client_name, pcmk_exec_status_str(op->result.execution_status)); request_peer_fencing(op, NULL); } gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action) { GHashTableIter iter; time_t now = time(NULL); remote_fencing_op_t *rop = NULL; if (tolerance <= 0 || !stonith_remote_op_list || target == NULL || action == NULL) { return FALSE; } g_hash_table_iter_init(&iter, stonith_remote_op_list); while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) { if (strcmp(rop->target, target) != 0) { continue; } else if (rop->state != st_done) { continue; /* We don't have to worry about remapped reboots here * because if state is done, any remapping has been undone */ } else if (strcmp(rop->action, action) != 0) { continue; } else if ((rop->completed + tolerance) < now) { continue; } crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s", target, action, tolerance, rop->delegate, rop->originator); return TRUE; } return FALSE; } diff --git a/include/crm/common/xml_element_internal.h b/include/crm/common/xml_element_internal.h index 0999d0267a..67710d404c 100644 --- a/include/crm/common/xml_element_internal.h +++ b/include/crm/common/xml_element_internal.h @@ -1,208 +1,208 @@ /* * Copyright 2017-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #ifndef PCMK__CRM_COMMON_XML_ELEMENT_INTERNAL__H #define PCMK__CRM_COMMON_XML_ELEMENT_INTERNAL__H /* * Internal-only wrappers for and extensions to libxml2 for processing XML * elements */ #include // bool #include // uint32_t #include // NULL #include // strcmp() #include // guint #include // xmlNode, etc. #include // crm_time_t #include // pcmk__str_copy() #include // PCMK_XA_ID #ifdef __cplusplus extern "C" { #endif const char *pcmk__xe_add_last_written(xmlNode *xe); xmlNode *pcmk__xe_first_child(const xmlNode *parent, const char *node_name, const char *attr_n, const char *attr_v); void pcmk__xe_remove_attr(xmlNode *element, const char *name); bool pcmk__xe_remove_attr_cb(xmlNode *xml, void *user_data); void pcmk__xe_remove_matching_attrs(xmlNode *element, bool force, bool (*match)(xmlAttrPtr, void *), void *user_data); int pcmk__xe_delete_match(xmlNode *xml, xmlNode *search); int pcmk__xe_replace_match(xmlNode *xml, xmlNode *replace); int pcmk__xe_update_match(xmlNode *xml, xmlNode *update, uint32_t flags); /*! * \internal * \brief Check whether an XML element is of a particular type * * \param[in] xml XML element to compare * \param[in] name XML element name to compare * * \return \c true if \p xml is of type \p name, otherwise \c false */ static inline bool pcmk__xe_is(const xmlNode *xml, const char *name) { return (xml != NULL) && (xml->name != NULL) && (name != NULL) && (strcmp((const char *) xml->name, name) == 0); } xmlNode *pcmk__xe_create(xmlNode *parent, const char *name); xmlNode *pcmk__xe_next(const xmlNode *node, const char *element_name); void pcmk__xe_set_content(xmlNode *node, const char *format, ...) G_GNUC_PRINTF(2, 3); int pcmk__xe_get_score(const xmlNode *xml, const char *name, int *score, int default_score); int pcmk__xe_copy_attrs(xmlNode *target, const xmlNode *src, uint32_t flags); void pcmk__xe_sort_attrs(xmlNode *xml); void pcmk__xe_set_id(xmlNode *xml, const char *format, ...) G_GNUC_PRINTF(2, 3); /*! * \internal * \brief Like pcmk__xe_set_props, but takes a va_list instead of * arguments directly. * * \param[in,out] node XML to add attributes to * \param[in] pairs NULL-terminated list of name/value pairs to add */ void pcmk__xe_set_propv(xmlNodePtr node, va_list pairs); /*! * \internal * \brief Add a NULL-terminated list of name/value pairs to the given * XML node as properties. * * \param[in,out] node XML node to add properties to * \param[in] ... NULL-terminated list of name/value pairs * * \note A NULL name terminates the arguments; a NULL value will be skipped. */ void pcmk__xe_set_props(xmlNodePtr node, ...) G_GNUC_NULL_TERMINATED; /*! * \internal * \brief Get first attribute of an XML element * * \param[in] xe XML element to check * * \return First attribute of \p xe (or NULL if \p xe is NULL or has none) */ static inline xmlAttr * pcmk__xe_first_attr(const xmlNode *xe) { return (xe == NULL)? NULL : xe->properties; } /*! * \internal * \brief Iterate over child elements of \p xml * * This function iterates over the children of \p xml, performing the * callback function \p handler on each node. If the callback returns * a value other than pcmk_rc_ok, the iteration stops and the value is * returned. It is therefore possible that not all children will be * visited. * * \param[in,out] xml The starting XML node. Can be NULL. * \param[in] child_element_name The name that the node must match in order * for \p handler to be run. If NULL, all * child elements will match. * \param[in] handler The callback function. * \param[in,out] userdata User data to pass to the callback function. * Can be NULL. * * \return Standard Pacemaker return code */ int pcmk__xe_foreach_child(xmlNode *xml, const char *child_element_name, int (*handler)(xmlNode *xml, void *userdata), void *userdata); const char *pcmk__xe_get(const xmlNode *xml, const char *attr_name); int pcmk__xe_set(xmlNode *xml, const char *attr_name, const char *value); int pcmk__xe_get_datetime(const xmlNode *xml, const char *attr, crm_time_t **t); int pcmk__xe_get_flags(const xmlNode *xml, const char *name, uint32_t *dest, uint32_t default_value); int pcmk__xe_get_guint(const xmlNode *xml, const char *attr, guint *dest); void pcmk__xe_set_guint(xmlNode *xml, const char *attr, guint value); int pcmk__xe_get_int(const xmlNode *xml, const char *name, int *dest); void pcmk__xe_set_int(xmlNode *xml, const char *attr, int value); int pcmk__xe_get_ll(const xmlNode *xml, const char *name, long long *dest); int pcmk__xe_set_ll(xmlNode *xml, const char *attr, long long value); int pcmk__xe_get_time(const xmlNode *xml, const char *attr, time_t *dest); void pcmk__xe_set_time(xmlNode *xml, const char *attr, time_t value); int pcmk__xe_get_timeval(const xmlNode *xml, const char *sec_attr, const char *usec_attr, struct timeval *dest); void pcmk__xe_set_timeval(xmlNode *xml, const char *sec_attr, const char *usec_attr, const struct timeval *value); int pcmk__xe_get_bool(const xmlNode *xml, const char *attr, bool *dest); -void pcmk__xe_set_bool_attr(xmlNodePtr node, const char *name, bool value); +void pcmk__xe_set_bool(xmlNode *xml, const char *attr, bool value); bool pcmk__xe_attr_is_true(const xmlNode *node, const char *name); /*! * \internal * \brief Retrieve a copy of the value of an XML attribute * * This is like \c pcmk__xe_get() but allocates new memory for the result. * * \param[in] xml XML element whose attribute to get * \param[in] attr Attribute name * * \return Value of specified attribute (or \c NULL if not set) * * \note The caller is responsible for freeing the result using \c free(). */ static inline char * pcmk__xe_get_copy(const xmlNode *xml, const char *attr) { return pcmk__str_copy(pcmk__xe_get(xml, attr)); } /*! * \internal * \brief Retrieve the value of the \c PCMK_XA_ID XML attribute * * \param[in] xml XML element to check * * \return Value of the \c PCMK_XA_ID attribute (may be \c NULL) */ static inline const char * pcmk__xe_id(const xmlNode *xml) { return pcmk__xe_get(xml, PCMK_XA_ID); } #ifdef __cplusplus } #endif #endif // PCMK__CRM_COMMON_XML_ELEMENT_INTERNAL__H diff --git a/lib/common/ipc_attrd.c b/lib/common/ipc_attrd.c index f9ab570008..f11e3727a1 100644 --- a/lib/common/ipc_attrd.c +++ b/lib/common/ipc_attrd.c @@ -1,503 +1,503 @@ /* * Copyright 2011-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include // xmlChar #include #include #include #include #include #include "crmcommon_private.h" static void set_pairs_data(pcmk__attrd_api_reply_t *data, xmlNode *msg_data) { const char *name = NULL; pcmk__attrd_query_pair_t *pair; name = pcmk__xe_get(msg_data, PCMK__XA_ATTR_NAME); for (xmlNode *node = pcmk__xe_first_child(msg_data, PCMK_XE_NODE, NULL, NULL); node != NULL; node = pcmk__xe_next(node, PCMK_XE_NODE)) { pair = pcmk__assert_alloc(1, sizeof(pcmk__attrd_query_pair_t)); pair->node = pcmk__xe_get(node, PCMK__XA_ATTR_HOST); pair->name = name; pair->value = pcmk__xe_get(node, PCMK__XA_ATTR_VALUE); data->data.pairs = g_list_prepend(data->data.pairs, pair); } } static bool reply_expected(pcmk_ipc_api_t *api, const xmlNode *request) { const char *command = pcmk__xe_get(request, PCMK_XA_TASK); return pcmk__str_any_of(command, PCMK__ATTRD_CMD_CLEAR_FAILURE, PCMK__ATTRD_CMD_QUERY, PCMK__ATTRD_CMD_REFRESH, PCMK__ATTRD_CMD_UPDATE, PCMK__ATTRD_CMD_UPDATE_BOTH, PCMK__ATTRD_CMD_UPDATE_DELAY, NULL); } static bool dispatch(pcmk_ipc_api_t *api, xmlNode *reply) { const char *value = NULL; crm_exit_t status = CRM_EX_OK; pcmk__attrd_api_reply_t reply_data = { pcmk__attrd_reply_unknown }; if (pcmk__xe_is(reply, PCMK__XE_ACK)) { return false; } /* Do some basic validation of the reply */ value = pcmk__xe_get(reply, PCMK__XA_T); if (pcmk__str_empty(value) || !pcmk__str_eq(value, PCMK__VALUE_ATTRD, pcmk__str_none)) { crm_info("Unrecognizable message from attribute manager: " "message type '%s' not '" PCMK__VALUE_ATTRD "'", pcmk__s(value, "")); status = CRM_EX_PROTOCOL; goto done; } value = pcmk__xe_get(reply, PCMK__XA_SUBT); /* Only the query command gets a reply for now. NULL counts as query for * backward compatibility with attribute managers <2.1.3 that didn't set it. */ if (pcmk__str_eq(value, PCMK__ATTRD_CMD_QUERY, pcmk__str_null_matches)) { if (!xmlHasProp(reply, (const xmlChar *) PCMK__XA_ATTR_NAME)) { status = ENXIO; // Most likely, the attribute doesn't exist goto done; } reply_data.reply_type = pcmk__attrd_reply_query; set_pairs_data(&reply_data, reply); } else { crm_info("Unrecognizable message from attribute manager: " "message subtype '%s' unknown", pcmk__s(value, "")); status = CRM_EX_PROTOCOL; goto done; } done: pcmk__call_ipc_callback(api, pcmk_ipc_event_reply, status, &reply_data); /* Free any reply data that was allocated */ if (reply_data.data.pairs) { g_list_free_full(reply_data.data.pairs, free); } return false; } pcmk__ipc_methods_t * pcmk__attrd_api_methods(void) { pcmk__ipc_methods_t *cmds = calloc(1, sizeof(pcmk__ipc_methods_t)); if (cmds != NULL) { cmds->new_data = NULL; cmds->free_data = NULL; cmds->post_connect = NULL; cmds->reply_expected = reply_expected; cmds->dispatch = dispatch; } return cmds; } /*! * \internal * \brief Create a generic attribute manager operation * * \param[in] user_name If not NULL, ACL user to set for operation * * \return XML of attribute manager operation */ static xmlNode * create_attrd_op(const char *user_name) { xmlNode *attrd_op = pcmk__xe_create(NULL, __func__); pcmk__xe_set(attrd_op, PCMK__XA_T, PCMK__VALUE_ATTRD); pcmk__xe_set(attrd_op, PCMK__XA_SRC, pcmk__s(crm_system_name, "unknown")); pcmk__xe_set(attrd_op, PCMK__XA_ATTR_USER, user_name); return attrd_op; } static int connect_and_send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request) { static const int max_retries = 5; int remaining_attempts = max_retries; int rc = pcmk_rc_ok; bool created_api = false; enum pcmk_ipc_dispatch dispatch = pcmk_ipc_dispatch_sync; if (api == NULL) { rc = pcmk_new_ipc_api(&api, pcmk_ipc_attrd); if (rc != pcmk_rc_ok) { return rc; } created_api = true; } else { dispatch = api->dispatch_type; } // If attrd is killed and is being restarted we will temporarily get // ECONNREFUSED on connect if it is already dead or ENOTCONN if it died // after we connected to it. We should wait a bit and retry in those cases. do { if (rc == ENOTCONN || rc == ECONNREFUSED) { sleep(max_retries - remaining_attempts); } rc = pcmk__connect_ipc(api, dispatch, remaining_attempts); if (rc == pcmk_rc_ok) { rc = pcmk__send_ipc_request(api, request); } remaining_attempts--; } while ((rc == ENOTCONN || rc == ECONNREFUSED) && remaining_attempts >= 0); if (created_api) { pcmk_free_ipc_api(api); } return rc; } int pcmk__attrd_api_clear_failures(pcmk_ipc_api_t *api, const char *node, const char *resource, const char *operation, const char *interval_spec, const char *user_name, uint32_t options) { int rc = pcmk_rc_ok; xmlNode *request = create_attrd_op(user_name); const char *interval_desc = NULL; const char *op_desc = NULL; const char *target = pcmk__node_attr_target(node); if (target != NULL) { node = target; } if (operation) { interval_desc = pcmk__s(interval_spec, "nonrecurring"); op_desc = operation; } else { interval_desc = "all"; op_desc = "operations"; } crm_debug("Asking %s to clear failure of %s %s for %s on %s", pcmk_ipc_name(api, true), interval_desc, op_desc, pcmk__s(resource, "all resources"), pcmk__s(node, "all nodes")); pcmk__xe_set(request, PCMK_XA_TASK, PCMK__ATTRD_CMD_CLEAR_FAILURE); pcmk__xe_set(request, PCMK__XA_ATTR_HOST, node); pcmk__xe_set(request, PCMK__XA_ATTR_RESOURCE, resource); pcmk__xe_set(request, PCMK__XA_ATTR_CLEAR_OPERATION, operation); pcmk__xe_set(request, PCMK__XA_ATTR_CLEAR_INTERVAL, interval_spec); pcmk__xe_set_int(request, PCMK__XA_ATTR_IS_REMOTE, pcmk__is_set(options, pcmk__node_attr_remote)); rc = connect_and_send_attrd_request(api, request); pcmk__xml_free(request); return rc; } int pcmk__attrd_api_delete(pcmk_ipc_api_t *api, const char *node, const char *name, uint32_t options) { const char *target = NULL; if (name == NULL) { return EINVAL; } target = pcmk__node_attr_target(node); if (target != NULL) { node = target; } /* Make sure the right update option is set. */ options &= ~pcmk__node_attr_delay; options |= pcmk__node_attr_value; return pcmk__attrd_api_update(api, node, name, NULL, NULL, NULL, NULL, options); } int pcmk__attrd_api_purge(pcmk_ipc_api_t *api, const char *node, bool reap) { int rc = pcmk_rc_ok; xmlNode *request = NULL; const char *target = pcmk__node_attr_target(node); if (target != NULL) { node = target; } crm_debug("Asking %s to purge transient attributes%s for %s", pcmk_ipc_name(api, true), (reap? " and node cache entries" : ""), pcmk__s(node, "local node")); request = create_attrd_op(NULL); pcmk__xe_set(request, PCMK_XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE); - pcmk__xe_set_bool_attr(request, PCMK__XA_REAP, reap); + pcmk__xe_set_bool(request, PCMK__XA_REAP, reap); pcmk__xe_set(request, PCMK__XA_ATTR_HOST, node); rc = connect_and_send_attrd_request(api, request); pcmk__xml_free(request); return rc; } int pcmk__attrd_api_query(pcmk_ipc_api_t *api, const char *node, const char *name, uint32_t options) { int rc = pcmk_rc_ok; xmlNode *request = NULL; const char *target = NULL; if (name == NULL) { return EINVAL; } if (pcmk__is_set(options, pcmk__node_attr_query_all)) { node = NULL; } else { target = pcmk__node_attr_target(node); if (target != NULL) { node = target; } else if (node == NULL) { node = "localhost"; } } crm_debug("Querying %s for value of '%s'%s%s", pcmk_ipc_name(api, true), name, ((node == NULL)? "" : " on "), pcmk__s(node, "")); request = create_attrd_op(NULL); pcmk__xe_set(request, PCMK__XA_ATTR_NAME, name); pcmk__xe_set(request, PCMK_XA_TASK, PCMK__ATTRD_CMD_QUERY); pcmk__xe_set(request, PCMK__XA_ATTR_HOST, node); rc = connect_and_send_attrd_request(api, request); pcmk__xml_free(request); return rc; } int pcmk__attrd_api_refresh(pcmk_ipc_api_t *api, const char *node) { int rc = pcmk_rc_ok; xmlNode *request = NULL; const char *target = pcmk__node_attr_target(node); if (target != NULL) { node = target; } crm_debug("Asking %s to write all transient attributes for %s to CIB", pcmk_ipc_name(api, true), pcmk__s(node, "local node")); request = create_attrd_op(NULL); pcmk__xe_set(request, PCMK_XA_TASK, PCMK__ATTRD_CMD_REFRESH); pcmk__xe_set(request, PCMK__XA_ATTR_HOST, node); rc = connect_and_send_attrd_request(api, request); pcmk__xml_free(request); return rc; } static void add_op_attr(xmlNode *op, uint32_t options) { if (pcmk__all_flags_set(options, pcmk__node_attr_value|pcmk__node_attr_delay)) { pcmk__xe_set(op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE_BOTH); } else if (pcmk__is_set(options, pcmk__node_attr_value)) { pcmk__xe_set(op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE); } else if (pcmk__is_set(options, pcmk__node_attr_delay)) { pcmk__xe_set(op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE_DELAY); } } static void populate_update_op(xmlNode *op, const char *node, const char *name, const char *value, const char *dampen, const char *set, uint32_t options) { if (pcmk__is_set(options, pcmk__node_attr_pattern)) { pcmk__xe_set(op, PCMK__XA_ATTR_REGEX, name); } else { pcmk__xe_set(op, PCMK__XA_ATTR_NAME, name); } if (pcmk__is_set(options, pcmk__node_attr_utilization)) { pcmk__xe_set(op, PCMK__XA_ATTR_SET_TYPE, PCMK_XE_UTILIZATION); } else { pcmk__xe_set(op, PCMK__XA_ATTR_SET_TYPE, PCMK_XE_INSTANCE_ATTRIBUTES); } add_op_attr(op, options); pcmk__xe_set(op, PCMK__XA_ATTR_VALUE, value); pcmk__xe_set(op, PCMK__XA_ATTR_DAMPENING, dampen); pcmk__xe_set(op, PCMK__XA_ATTR_HOST, node); pcmk__xe_set(op, PCMK__XA_ATTR_SET, set); pcmk__xe_set_int(op, PCMK__XA_ATTR_IS_REMOTE, pcmk__is_set(options, pcmk__node_attr_remote)); pcmk__xe_set_int(op, PCMK__XA_ATTR_IS_PRIVATE, pcmk__is_set(options, pcmk__node_attr_private)); if (pcmk__is_set(options, pcmk__node_attr_sync_local)) { pcmk__xe_set(op, PCMK__XA_ATTR_SYNC_POINT, PCMK__VALUE_LOCAL); } else if (pcmk__is_set(options, pcmk__node_attr_sync_cluster)) { pcmk__xe_set(op, PCMK__XA_ATTR_SYNC_POINT, PCMK__VALUE_CLUSTER); } } int pcmk__attrd_api_update(pcmk_ipc_api_t *api, const char *node, const char *name, const char *value, const char *dampen, const char *set, const char *user_name, uint32_t options) { int rc = pcmk_rc_ok; xmlNode *request = NULL; const char *target = NULL; if (name == NULL) { return EINVAL; } target = pcmk__node_attr_target(node); if (target != NULL) { node = target; } crm_debug("Asking %s to update '%s' to '%s' for %s", pcmk_ipc_name(api, true), name, pcmk__s(value, "(null)"), pcmk__s(node, "local node")); request = create_attrd_op(user_name); populate_update_op(request, node, name, value, dampen, set, options); rc = connect_and_send_attrd_request(api, request); pcmk__xml_free(request); return rc; } int pcmk__attrd_api_update_list(pcmk_ipc_api_t *api, GList *attrs, const char *dampen, const char *set, const char *user_name, uint32_t options) { int rc = pcmk_rc_ok; xmlNode *request = NULL; if (attrs == NULL) { return EINVAL; } /* There are two different ways of handling a list of attributes: * * (1) For messages originating from some command line tool, we have to send * them one at a time. In this loop, we just call pcmk__attrd_api_update * for each, letting it deal with creating the API object if it doesn't * already exist. * * The reason we can't use a single message in this case is that we can't * trust that the server supports it. Remote nodes could be involved * here, and there's no guarantee that a newer client running on a remote * node is talking to (or proxied through) a cluster node with a newer * attrd. We also can't just try sending a single message and then falling * back on multiple. There's no handshake with the attrd server to * determine its version. And then we would need to do that fallback in the * dispatch function for this to work for all connection types (mainloop in * particular), and at that point we won't know what the original message * was in order to break it apart and resend as individual messages. * * (2) For messages between daemons, we can be assured that the local attrd * will support the new message and that it can send to the other attrds * as one request or split up according to the minimum supported version. */ for (GList *iter = attrs; iter != NULL; iter = iter->next) { pcmk__attrd_query_pair_t *pair = (pcmk__attrd_query_pair_t *) iter->data; if (pcmk__is_daemon) { const char *target = NULL; xmlNode *child = NULL; /* First time through this loop - create the basic request. */ if (request == NULL) { request = create_attrd_op(user_name); add_op_attr(request, options); } /* Add a child node for this operation. We add the task to the top * level XML node so attrd_ipc_dispatch doesn't need changes. And * then we also add the task to each child node in populate_update_op * so attrd_client_update knows what form of update is taking place. */ child = pcmk__xe_create(request, PCMK_XE_OP); target = pcmk__node_attr_target(pair->node); if (target != NULL) { pair->node = target; } populate_update_op(child, pair->node, pair->name, pair->value, dampen, set, options); } else { rc = pcmk__attrd_api_update(api, pair->node, pair->name, pair->value, dampen, set, user_name, options); } } /* If we were doing multiple attributes at once, we still need to send the * request. Do that now, creating and destroying the API object if needed. */ if (pcmk__is_daemon) { rc = connect_and_send_attrd_request(api, request); pcmk__xml_free(request); } return rc; } diff --git a/lib/common/ipc_client.c b/lib/common/ipc_client.c index 83030523a7..49147fa278 100644 --- a/lib/common/ipc_client.c +++ b/lib/common/ipc_client.c @@ -1,1791 +1,1791 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #if defined(HAVE_UCRED) || defined(HAVE_SOCKPEERCRED) #include #elif defined(HAVE_GETPEERUCRED) #include #endif #include #include #include #include #include /* indirectly: pcmk_err_generic */ #include #include #include #include "crmcommon_private.h" static int is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid); /*! * \brief Create a new object for using Pacemaker daemon IPC * * \param[out] api Where to store new IPC object * \param[in] server Which Pacemaker daemon the object is for * * \return Standard Pacemaker result code * * \note The caller is responsible for freeing *api using pcmk_free_ipc_api(). * \note This is intended to supersede crm_ipc_new() but currently only * supports the controller, pacemakerd, and schedulerd IPC API. */ int pcmk_new_ipc_api(pcmk_ipc_api_t **api, enum pcmk_ipc_server server) { if (api == NULL) { return EINVAL; } *api = calloc(1, sizeof(pcmk_ipc_api_t)); if (*api == NULL) { return errno; } (*api)->server = server; if (pcmk_ipc_name(*api, false) == NULL) { pcmk_free_ipc_api(*api); *api = NULL; return EOPNOTSUPP; } // Set server methods switch (server) { case pcmk_ipc_attrd: (*api)->cmds = pcmk__attrd_api_methods(); break; case pcmk_ipc_based: break; case pcmk_ipc_controld: (*api)->cmds = pcmk__controld_api_methods(); break; case pcmk_ipc_execd: break; case pcmk_ipc_fenced: break; case pcmk_ipc_pacemakerd: (*api)->cmds = pcmk__pacemakerd_api_methods(); break; case pcmk_ipc_schedulerd: (*api)->cmds = pcmk__schedulerd_api_methods(); break; default: // pcmk_ipc_unknown pcmk_free_ipc_api(*api); *api = NULL; return EINVAL; } if ((*api)->cmds == NULL) { pcmk_free_ipc_api(*api); *api = NULL; return ENOMEM; } (*api)->ipc = crm_ipc_new(pcmk_ipc_name(*api, false), 0); if ((*api)->ipc == NULL) { pcmk_free_ipc_api(*api); *api = NULL; return ENOMEM; } // If daemon API has its own data to track, allocate it if ((*api)->cmds->new_data != NULL) { if ((*api)->cmds->new_data(*api) != pcmk_rc_ok) { pcmk_free_ipc_api(*api); *api = NULL; return ENOMEM; } } crm_trace("Created %s API IPC object", pcmk_ipc_name(*api, true)); return pcmk_rc_ok; } static void free_daemon_specific_data(pcmk_ipc_api_t *api) { if ((api != NULL) && (api->cmds != NULL)) { if ((api->cmds->free_data != NULL) && (api->api_data != NULL)) { api->cmds->free_data(api->api_data); api->api_data = NULL; } free(api->cmds); api->cmds = NULL; } } /*! * \internal * \brief Call an IPC API event callback, if one is registed * * \param[in,out] api IPC API connection * \param[in] event_type The type of event that occurred * \param[in] status Event status * \param[in,out] event_data Event-specific data */ void pcmk__call_ipc_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, crm_exit_t status, void *event_data) { if ((api != NULL) && (api->cb != NULL)) { api->cb(api, event_type, status, event_data, api->user_data); } } /*! * \internal * \brief Clean up after an IPC disconnect * * \param[in,out] user_data IPC API connection that disconnected * * \note This function can be used as a main loop IPC destroy callback. */ static void ipc_post_disconnect(gpointer user_data) { pcmk_ipc_api_t *api = user_data; crm_info("Disconnected from %s", pcmk_ipc_name(api, true)); // Perform any daemon-specific handling needed if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) { api->cmds->post_disconnect(api); } // Call client's registered event callback pcmk__call_ipc_callback(api, pcmk_ipc_event_disconnect, CRM_EX_DISCONNECT, NULL); /* If this is being called from a running main loop, mainloop_gio_destroy() * will free ipc and mainloop_io immediately after calling this function. * If this is called from a stopped main loop, these will leak, so the best * practice is to close the connection before stopping the main loop. */ api->ipc = NULL; api->mainloop_io = NULL; if (api->free_on_disconnect) { /* pcmk_free_ipc_api() has already been called, but did not free api * or api->cmds because this function needed them. Do that now. */ free_daemon_specific_data(api); crm_trace("Freeing IPC API object after disconnect"); free(api); } } /*! * \brief Free the contents of an IPC API object * * \param[in,out] api IPC API object to free */ void pcmk_free_ipc_api(pcmk_ipc_api_t *api) { bool free_on_disconnect = false; if (api == NULL) { return; } crm_debug("Releasing %s IPC API", pcmk_ipc_name(api, true)); if (api->ipc != NULL) { if (api->mainloop_io != NULL) { /* We need to keep the api pointer itself around, because it is the * user data for the IPC client destroy callback. That will be * triggered by the pcmk_disconnect_ipc() call below, but it might * happen later in the main loop (if still running). * * This flag tells the destroy callback to free the object. It can't * do that unconditionally, because the application might call this * function after a disconnect that happened by other means. */ free_on_disconnect = api->free_on_disconnect = true; } pcmk_disconnect_ipc(api); // Frees api if free_on_disconnect is true } if (!free_on_disconnect) { free_daemon_specific_data(api); crm_trace("Freeing IPC API object"); free(api); } } /*! * \brief Get the IPC name used with an IPC API connection * * \param[in] api IPC API connection * \param[in] for_log If true, return human-friendly name instead of IPC name * * \return IPC API's human-friendly or connection name, or if none is available, * "Pacemaker" if for_log is true and NULL if for_log is false */ const char * pcmk_ipc_name(const pcmk_ipc_api_t *api, bool for_log) { if (api == NULL) { return for_log? "Pacemaker" : NULL; } if (for_log) { const char *name = pcmk__server_log_name(api->server); return pcmk__s(name, "Pacemaker"); } switch (api->server) { // These servers do not have pcmk_ipc_api_t implementations yet case pcmk_ipc_based: case pcmk_ipc_execd: case pcmk_ipc_fenced: return NULL; default: return pcmk__server_ipc_name(api->server); } } /*! * \brief Check whether an IPC API connection is active * * \param[in,out] api IPC API connection * * \return true if IPC is connected, false otherwise */ bool pcmk_ipc_is_connected(pcmk_ipc_api_t *api) { return (api != NULL) && crm_ipc_connected(api->ipc); } /*! * \internal * \brief Call the daemon-specific API's dispatch function * * Perform daemon-specific handling of IPC reply dispatch. It is the daemon * method's responsibility to call the client's registered event callback, as * well as allocate and free any event data. * * \param[in,out] api IPC API connection * \param[in,out] message IPC reply XML to dispatch */ static bool call_api_dispatch(pcmk_ipc_api_t *api, xmlNode *message) { crm_log_xml_trace(message, "ipc-received"); if ((api->cmds != NULL) && (api->cmds->dispatch != NULL)) { return api->cmds->dispatch(api, message); } return false; } /*! * \internal * \brief Dispatch previously read IPC data * * \param[in] buffer Data read from IPC * \param[in,out] api IPC object * * \return Standard Pacemaker return code. In particular: * * pcmk_rc_ok: There are no more messages expected from the server. Quit * reading. * EINPROGRESS: There are more messages expected from the server. Keep reading. * * All other values indicate an error. */ static int dispatch_ipc_data(const char *buffer, pcmk_ipc_api_t *api) { bool more = false; xmlNode *msg; if (buffer == NULL) { crm_warn("Empty message received from %s IPC", pcmk_ipc_name(api, true)); return ENOMSG; } msg = pcmk__xml_parse(buffer); if (msg == NULL) { crm_warn("Malformed message received from %s IPC", pcmk_ipc_name(api, true)); return EPROTO; } more = call_api_dispatch(api, msg); pcmk__xml_free(msg); if (more) { return EINPROGRESS; } else { return pcmk_rc_ok; } } /*! * \internal * \brief Dispatch data read from IPC source * * \param[in] buffer Data read from IPC * \param[in] length Number of bytes of data in buffer (ignored) * \param[in,out] user_data IPC object * * \return Always 0 (meaning connection is still required) * * \note This function can be used as a main loop IPC dispatch callback. */ static int dispatch_ipc_source_data(const char *buffer, ssize_t length, gpointer user_data) { pcmk_ipc_api_t *api = user_data; CRM_CHECK(api != NULL, return 0); dispatch_ipc_data(buffer, api); return 0; } /*! * \brief Check whether an IPC connection has data available (without main loop) * * \param[in] api IPC API connection * \param[in] timeout_ms If less than 0, poll indefinitely; if 0, poll once * and return immediately; otherwise, poll for up to * this many milliseconds * * \return Standard Pacemaker return code * * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call * this function to check whether IPC data is available. Return values of * interest include pcmk_rc_ok meaning data is available, and EAGAIN * meaning no data is available; all other values indicate errors. * \todo This does not allow the caller to poll multiple file descriptors at * once. If there is demand for that, we could add a wrapper for * pcmk__ipc_fd(api->ipc), so the caller can call poll() themselves. */ int pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms) { int rc; struct pollfd pollfd = { 0, }; if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) { return EINVAL; } rc = pcmk__ipc_fd(api->ipc, &(pollfd.fd)); if (rc != pcmk_rc_ok) { crm_debug("Could not obtain file descriptor for %s IPC: %s", pcmk_ipc_name(api, true), pcmk_rc_str(rc)); return rc; } pollfd.events = POLLIN; rc = poll(&pollfd, 1, timeout_ms); if (rc < 0) { /* Some UNIX systems return negative and set EAGAIN for failure to * allocate memory; standardize the return code in that case */ return (errno == EAGAIN)? ENOMEM : errno; } else if (rc == 0) { return EAGAIN; } return pcmk_rc_ok; } /*! * \brief Dispatch available messages on an IPC connection (without main loop) * * \param[in,out] api IPC API connection * * \return Standard Pacemaker return code * * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call * this function when IPC data is available. */ void pcmk_dispatch_ipc(pcmk_ipc_api_t *api) { if (api == NULL) { return; } while (crm_ipc_ready(api->ipc) > 0) { if (crm_ipc_read(api->ipc) > 0) { dispatch_ipc_data(crm_ipc_buffer(api->ipc), api); pcmk__ipc_free_client_buffer(api->ipc); } } } // \return Standard Pacemaker return code static int connect_with_main_loop(pcmk_ipc_api_t *api) { int rc; struct ipc_client_callbacks callbacks = { .dispatch = dispatch_ipc_source_data, .destroy = ipc_post_disconnect, }; rc = pcmk__add_mainloop_ipc(api->ipc, G_PRIORITY_DEFAULT, api, &callbacks, &(api->mainloop_io)); if (rc != pcmk_rc_ok) { return rc; } crm_debug("Connected to %s IPC (attached to main loop)", pcmk_ipc_name(api, true)); /* After this point, api->mainloop_io owns api->ipc, so api->ipc * should not be explicitly freed. */ return pcmk_rc_ok; } // \return Standard Pacemaker return code static int connect_without_main_loop(pcmk_ipc_api_t *api) { int rc = pcmk__connect_generic_ipc(api->ipc); if (rc != pcmk_rc_ok) { crm_ipc_close(api->ipc); } else { crm_debug("Connected to %s IPC (without main loop)", pcmk_ipc_name(api, true)); } return rc; } /*! * \internal * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors * and ECONNREFUSED) * * \param[in,out] api IPC API instance * \param[in] dispatch_type How IPC replies should be dispatched * \param[in] attempts How many times to try (in case of soft error) * * \return Standard Pacemaker return code */ int pcmk__connect_ipc_retry_conrefused(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type, int attempts) { int remaining = attempts; int rc = pcmk_rc_ok; do { if (rc == ECONNREFUSED) { pcmk__sleep_ms((attempts - remaining) * 500); } rc = pcmk__connect_ipc(api, dispatch_type, remaining); remaining--; } while (rc == ECONNREFUSED && remaining >= 0); return rc; } /*! * \internal * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors) * * \param[in,out] api IPC API instance * \param[in] dispatch_type How IPC replies should be dispatched * \param[in] attempts How many times to try (in case of soft error) * * \return Standard Pacemaker return code */ int pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type, int attempts) { int rc = pcmk_rc_ok; if ((api == NULL) || (attempts < 1)) { return EINVAL; } if (api->ipc == NULL) { api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), 0); if (api->ipc == NULL) { return ENOMEM; } } if (crm_ipc_connected(api->ipc)) { crm_trace("Already connected to %s", pcmk_ipc_name(api, true)); return pcmk_rc_ok; } api->dispatch_type = dispatch_type; crm_debug("Attempting connection to %s (up to %d time%s)", pcmk_ipc_name(api, true), attempts, pcmk__plural_s(attempts)); for (int remaining = attempts - 1; remaining >= 0; --remaining) { switch (dispatch_type) { case pcmk_ipc_dispatch_main: rc = connect_with_main_loop(api); break; case pcmk_ipc_dispatch_sync: case pcmk_ipc_dispatch_poll: rc = connect_without_main_loop(api); break; } if ((remaining == 0) || ((rc != EAGAIN) && (rc != EALREADY))) { break; // Result is final } // Retry after soft error (interrupted by signal, etc.) pcmk__sleep_ms((attempts - remaining) * 500); crm_debug("Re-attempting connection to %s (%d attempt%s remaining)", pcmk_ipc_name(api, true), remaining, pcmk__plural_s(remaining)); } if (rc != pcmk_rc_ok) { return rc; } if ((api->cmds != NULL) && (api->cmds->post_connect != NULL)) { rc = api->cmds->post_connect(api); if (rc != pcmk_rc_ok) { crm_ipc_close(api->ipc); } } return rc; } /*! * \brief Connect to a Pacemaker daemon via IPC * * \param[in,out] api IPC API instance * \param[in] dispatch_type How IPC replies should be dispatched * * \return Standard Pacemaker return code */ int pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) { int rc = pcmk__connect_ipc(api, dispatch_type, 2); if (rc != pcmk_rc_ok) { crm_err("Connection to %s failed: %s", pcmk_ipc_name(api, true), pcmk_rc_str(rc)); } return rc; } /*! * \brief Disconnect an IPC API instance * * \param[in,out] api IPC API connection * * \return Standard Pacemaker return code * * \note If the connection is attached to a main loop, this function should be * called before quitting the main loop, to ensure that all memory is * freed. */ void pcmk_disconnect_ipc(pcmk_ipc_api_t *api) { if ((api == NULL) || (api->ipc == NULL)) { return; } switch (api->dispatch_type) { case pcmk_ipc_dispatch_main: { mainloop_io_t *mainloop_io = api->mainloop_io; // Make sure no code with access to api can use these again api->mainloop_io = NULL; api->ipc = NULL; mainloop_del_ipc_client(mainloop_io); // After this point api might have already been freed } break; case pcmk_ipc_dispatch_poll: case pcmk_ipc_dispatch_sync: { crm_ipc_t *ipc = api->ipc; // Make sure no code with access to api can use ipc again api->ipc = NULL; // This should always be the case already, but to be safe api->free_on_disconnect = false; crm_ipc_close(ipc); crm_ipc_destroy(ipc); ipc_post_disconnect(api); } break; } } /*! * \brief Register a callback for IPC API events * * \param[in,out] api IPC API connection * \param[in] callback Callback to register * \param[in] userdata Caller data to pass to callback * * \note This function may be called multiple times to update the callback * and/or user data. The caller remains responsible for freeing * userdata in any case (after the IPC is disconnected, if the * user data is still registered with the IPC). */ void pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb, void *user_data) { if (api == NULL) { return; } api->cb = cb; api->user_data = user_data; } /*! * \internal * \brief Send an XML request across an IPC API connection * * \param[in,out] api IPC API connection * \param[in] request XML request to send * * \return Standard Pacemaker return code * * \note Daemon-specific IPC API functions should call this function to send * requests, because it handles different dispatch types appropriately. */ int pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request) { int rc; xmlNode *reply = NULL; enum crm_ipc_flags flags = crm_ipc_flags_none; if ((api == NULL) || (api->ipc == NULL) || (request == NULL)) { return EINVAL; } crm_log_xml_trace(request, "ipc-sent"); // Synchronous dispatch requires waiting for a reply if ((api->dispatch_type == pcmk_ipc_dispatch_sync) && (api->cmds != NULL) && (api->cmds->reply_expected != NULL) && (api->cmds->reply_expected(api, request))) { flags = crm_ipc_client_response; } /* The 0 here means a default timeout of 5 seconds * * @TODO Maybe add a timeout_ms member to pcmk_ipc_api_t and a * pcmk_set_ipc_timeout() setter for it, then use it here. */ rc = crm_ipc_send(api->ipc, request, flags, 0, &reply); if (rc < 0) { return pcmk_legacy2rc(rc); } else if (rc == 0) { return ENODATA; } // With synchronous dispatch, we dispatch any reply now if (reply != NULL) { bool more = call_api_dispatch(api, reply); pcmk__xml_free(reply); while (more) { rc = crm_ipc_read(api->ipc); if (rc == -EAGAIN) { continue; } else if (rc == -ENOMSG || rc == pcmk_ok) { return pcmk_rc_ok; } else if (rc < 0) { return -rc; } rc = dispatch_ipc_data(crm_ipc_buffer(api->ipc), api); pcmk__ipc_free_client_buffer(api->ipc); if (rc == pcmk_rc_ok) { more = false; } else if (rc == EINPROGRESS) { more = true; } else { continue; } } } return pcmk_rc_ok; } /*! * \internal * \brief Create the XML for an IPC request to purge a node from the peer cache * * \param[in] api IPC API connection * \param[in] node_name If not NULL, name of node to purge * \param[in] nodeid If not 0, node ID of node to purge * * \return Newly allocated IPC request XML * * \note The controller, fencer, and pacemakerd use the same request syntax, but * the attribute manager uses a different one. The CIB manager doesn't * have any syntax for it. The executor and scheduler don't connect to the * cluster layer and thus don't have or need any syntax for it. * * \todo Modify the attribute manager to accept the common syntax (as well * as its current one, for compatibility with older clients). Modify * the CIB manager to accept and honor the common syntax. Modify the * executor and scheduler to accept the syntax (immediately returning * success), just for consistency. Modify this function to use the * common syntax with all daemons if their version supports it. */ static xmlNode * create_purge_node_request(const pcmk_ipc_api_t *api, const char *node_name, uint32_t nodeid) { xmlNode *request = NULL; const char *client = crm_system_name? crm_system_name : "client"; switch (api->server) { case pcmk_ipc_attrd: request = pcmk__xe_create(NULL, __func__); pcmk__xe_set(request, PCMK__XA_T, PCMK__VALUE_ATTRD); pcmk__xe_set(request, PCMK__XA_SRC, crm_system_name); pcmk__xe_set(request, PCMK_XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE); - pcmk__xe_set_bool_attr(request, PCMK__XA_REAP, true); + pcmk__xe_set_bool(request, PCMK__XA_REAP, true); pcmk__xe_set(request, PCMK__XA_ATTR_HOST, node_name); if (nodeid > 0) { pcmk__xe_set_int(request, PCMK__XA_ATTR_HOST_ID, nodeid); } break; case pcmk_ipc_controld: case pcmk_ipc_fenced: case pcmk_ipc_pacemakerd: request = pcmk__new_request(api->server, client, NULL, pcmk_ipc_name(api, false), CRM_OP_RM_NODE_CACHE, NULL); if (nodeid > 0) { pcmk__xe_set_ll(request, PCMK_XA_ID, (long long) nodeid); } pcmk__xe_set(request, PCMK_XA_UNAME, node_name); break; case pcmk_ipc_based: case pcmk_ipc_execd: case pcmk_ipc_schedulerd: break; default: // pcmk_ipc_unknown (shouldn't be possible) return NULL; } return request; } /*! * \brief Ask a Pacemaker daemon to purge a node from its peer cache * * \param[in,out] api IPC API connection * \param[in] node_name If not NULL, name of node to purge * \param[in] nodeid If not 0, node ID of node to purge * * \return Standard Pacemaker return code * * \note At least one of node_name or nodeid must be specified. */ int pcmk_ipc_purge_node(pcmk_ipc_api_t *api, const char *node_name, uint32_t nodeid) { int rc = 0; xmlNode *request = NULL; if (api == NULL) { return EINVAL; } if ((node_name == NULL) && (nodeid == 0)) { return EINVAL; } request = create_purge_node_request(api, node_name, nodeid); if (request == NULL) { return EOPNOTSUPP; } rc = pcmk__send_ipc_request(api, request); pcmk__xml_free(request); crm_debug("%s peer cache purge of node %s[%lu]: rc=%d", pcmk_ipc_name(api, true), node_name, (unsigned long) nodeid, rc); return rc; } /* * Generic IPC API (to eventually be deprecated as public API and made internal) */ struct crm_ipc_s { struct pollfd pfd; int need_reply; GByteArray *buffer; char *server_name; // server IPC name being connected to qb_ipcc_connection_t *ipc; }; /*! * \brief Create a new (legacy) object for using Pacemaker daemon IPC * * \param[in] name IPC system name to connect to * \param[in] max_size Use a maximum IPC buffer size of at least this size * * \return Newly allocated IPC object on success, NULL otherwise * * \note The caller is responsible for freeing the result using * crm_ipc_destroy(). * \note This should be considered deprecated for use with daemons supported by * pcmk_new_ipc_api(). * \note @COMPAT Since 3.0.1, \p max_size is ignored and the default given by * \c crm_ipc_default_buffer_size() will be used instead. */ crm_ipc_t * crm_ipc_new(const char *name, size_t max_size) { crm_ipc_t *client = NULL; client = calloc(1, sizeof(crm_ipc_t)); if (client == NULL) { crm_err("Could not create IPC connection: %s", strerror(errno)); return NULL; } client->server_name = strdup(name); if (client->server_name == NULL) { crm_err("Could not create %s IPC connection: %s", name, strerror(errno)); free(client); return NULL; } client->buffer = NULL; client->pfd.fd = -1; client->pfd.events = POLLIN; client->pfd.revents = 0; return client; } /*! * \internal * \brief Connect a generic (not daemon-specific) IPC object * * \param[in,out] ipc Generic IPC object to connect * * \return Standard Pacemaker return code */ int pcmk__connect_generic_ipc(crm_ipc_t *ipc) { uid_t cl_uid = 0; gid_t cl_gid = 0; pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; int rc = pcmk_rc_ok; if (ipc == NULL) { return EINVAL; } ipc->need_reply = FALSE; ipc->ipc = qb_ipcc_connect(ipc->server_name, crm_ipc_default_buffer_size()); if (ipc->ipc == NULL) { return errno; } rc = qb_ipcc_fd_get(ipc->ipc, &ipc->pfd.fd); if (rc < 0) { // -errno crm_ipc_close(ipc); return -rc; } rc = pcmk_daemon_user(&cl_uid, &cl_gid); rc = pcmk_legacy2rc(rc); if (rc != pcmk_rc_ok) { crm_ipc_close(ipc); return rc; } rc = is_ipc_provider_expected(ipc->ipc, ipc->pfd.fd, cl_uid, cl_gid, &found_pid, &found_uid, &found_gid); if (rc != pcmk_rc_ok) { if (rc == pcmk_rc_ipc_unauthorized) { crm_info("%s IPC provider authentication failed: process %lld has " "uid %lld (expected %lld) and gid %lld (expected %lld)", ipc->server_name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), (long long) found_uid, (long long) cl_uid, (long long) found_gid, (long long) cl_gid); } crm_ipc_close(ipc); return rc; } return pcmk_rc_ok; } void crm_ipc_close(crm_ipc_t * client) { if (client) { if (client->ipc) { qb_ipcc_connection_t *ipc = client->ipc; client->ipc = NULL; qb_ipcc_disconnect(ipc); } } } void crm_ipc_destroy(crm_ipc_t * client) { if (client) { if (client->ipc && qb_ipcc_is_connected(client->ipc)) { crm_notice("Destroying active %s IPC connection", client->server_name); /* The next line is basically unsafe * * If this connection was attached to mainloop and mainloop is active, * the 'disconnected' callback will end up back here and we'll end * up free'ing the memory twice - something that can still happen * even without this if we destroy a connection and it closes before * we call exit */ /* crm_ipc_close(client); */ } else { crm_trace("Destroying inactive %s IPC connection", client->server_name); } if (client->buffer != NULL) { pcmk__ipc_free_client_buffer(client); } free(client->server_name); free(client); } } /*! * \internal * \brief Get the file descriptor for a generic IPC object * * \param[in,out] ipc Generic IPC object to get file descriptor for * \param[out] fd Where to store file descriptor * * \return Standard Pacemaker return code */ int pcmk__ipc_fd(crm_ipc_t *ipc, int *fd) { if ((ipc == NULL) || (fd == NULL)) { return EINVAL; } if ((ipc->ipc == NULL) || (ipc->pfd.fd < 0)) { return ENOTCONN; } *fd = ipc->pfd.fd; return pcmk_rc_ok; } int crm_ipc_get_fd(crm_ipc_t * client) { int fd = -1; if (pcmk__ipc_fd(client, &fd) != pcmk_rc_ok) { crm_err("Could not obtain file descriptor for %s IPC", ((client == NULL)? "unspecified" : client->server_name)); errno = EINVAL; return -EINVAL; } return fd; } bool crm_ipc_connected(crm_ipc_t * client) { bool rc = FALSE; if (client == NULL) { crm_trace("No client"); return FALSE; } else if (client->ipc == NULL) { crm_trace("No connection"); return FALSE; } else if (client->pfd.fd < 0) { crm_trace("Bad descriptor"); return FALSE; } rc = qb_ipcc_is_connected(client->ipc); if (rc == FALSE) { client->pfd.fd = -EINVAL; } return rc; } /*! * \brief Check whether an IPC connection is ready to be read * * \param[in,out] client Connection to check * * \return Positive value if ready to be read, 0 if not ready, -errno on error */ int crm_ipc_ready(crm_ipc_t *client) { int rc; pcmk__assert(client != NULL); if (!crm_ipc_connected(client)) { return -ENOTCONN; } client->pfd.revents = 0; rc = poll(&(client->pfd), 1, 0); return (rc < 0)? -errno : rc; } long crm_ipc_read(crm_ipc_t *client) { guint8 *buffer = NULL; long rc = -ENOMSG; pcmk__assert((client != NULL) && (client->ipc != NULL)); buffer = g_malloc0(crm_ipc_default_buffer_size()); do { pcmk__ipc_header_t *header = NULL; ssize_t bytes = qb_ipcc_event_recv(client->ipc, buffer, crm_ipc_default_buffer_size(), 0); header = (pcmk__ipc_header_t *)(void *) buffer; if (bytes <= 0) { crm_trace("No message received from %s IPC: %s", client->server_name, strerror(-bytes)); if (!crm_ipc_connected(client) || bytes == -ENOTCONN) { crm_err("Connection to %s IPC failed", client->server_name); rc = -ENOTCONN; pcmk__ipc_free_client_buffer(client); } else if (bytes == -EAGAIN) { rc = -EAGAIN; } goto done; } else if (bytes != header->size + sizeof(pcmk__ipc_header_t)) { crm_err("Message size does not match header"); rc = -EBADMSG; pcmk__ipc_free_client_buffer(client); goto done; } crm_trace("Received %s IPC event %" PRId32 " size=%" PRIu32 " rc=%zu", client->server_name, header->qb.id, header->qb.size, bytes); rc = pcmk__ipc_msg_append(&client->buffer, buffer); if (rc == pcmk_rc_ok) { break; } else if (rc == pcmk_rc_ipc_more) { continue; } else { pcmk__ipc_free_client_buffer(client); rc = pcmk_rc2legacy(rc); goto done; } } while (true); if (client->buffer->len > 0) { /* Data length excluding the header */ rc = client->buffer->len - sizeof(pcmk__ipc_header_t); } done: g_free(buffer); return rc; } void pcmk__ipc_free_client_buffer(crm_ipc_t *client) { pcmk__assert(client != NULL); if (client->buffer != NULL) { g_byte_array_free(client->buffer, TRUE); client->buffer = NULL; } } const char * crm_ipc_buffer(crm_ipc_t * client) { pcmk__assert(client != NULL); CRM_CHECK(client->buffer != NULL, return NULL); return (const char *) (client->buffer->data + sizeof(pcmk__ipc_header_t)); } uint32_t crm_ipc_buffer_flags(crm_ipc_t * client) { pcmk__ipc_header_t *header = NULL; pcmk__assert(client != NULL); if (client->buffer == NULL) { return 0; } header = (pcmk__ipc_header_t *)(void*) client->buffer->data; return header->flags; } const char * crm_ipc_name(crm_ipc_t * client) { pcmk__assert(client != NULL); return client->server_name; } // \return Standard Pacemaker return code static int internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout, ssize_t *bytes, xmlNode **reply) { guint8 *buffer = NULL; pcmk__ipc_header_t *hdr = NULL; time_t timeout = 0; int32_t qb_timeout = -1; int rc = pcmk_rc_ok; int reply_id = 0; if (ms_timeout > 0) { timeout = time(NULL) + 1 + pcmk__timeout_ms2s(ms_timeout); qb_timeout = 1000; } /* get the reply */ crm_trace("Expecting reply to %s IPC message %d", client->server_name, request_id); buffer = g_malloc0(crm_ipc_default_buffer_size()); do { guint8 *data = NULL; xmlNode *xml = NULL; *bytes = qb_ipcc_recv(client->ipc, buffer, crm_ipc_default_buffer_size(), qb_timeout); hdr = (pcmk__ipc_header_t *) (void *) buffer; if (*bytes <= 0) { if (!crm_ipc_connected(client)) { crm_err("%s IPC provider disconnected while waiting for message %d", client->server_name, request_id); break; } continue; } else if (*bytes != hdr->size + sizeof(pcmk__ipc_header_t)) { crm_err("Message size does not match header"); *bytes = -EBADMSG; break; } reply_id = hdr->qb.id; if (reply_id == request_id) { /* Got the reply we were expecting. */ rc = pcmk__ipc_msg_append(&client->buffer, buffer); if (rc == pcmk_rc_ok) { break; } else if (rc == pcmk_rc_ipc_more) { continue; } else { goto done; } } data = buffer + sizeof(pcmk__ipc_header_t); xml = pcmk__xml_parse((const char *) data); if (reply_id < request_id) { crm_err("Discarding old reply %d (need %d)", reply_id, request_id); crm_log_xml_notice(xml, "OldIpcReply"); } else if (reply_id > request_id) { crm_err("Discarding newer reply %d (need %d)", reply_id, request_id); crm_log_xml_notice(xml, "ImpossibleReply"); pcmk__assert(hdr->qb.id <= request_id); } } while (time(NULL) < timeout || (timeout == 0 && *bytes == -EAGAIN)); if (*bytes < 0) { rc = (int) -*bytes; // System errno crm_trace("%s reply to %s IPC %d: %s " QB_XS " rc=%d", (client->buffer == NULL) ? "No" : "Incomplete", client->server_name, request_id, pcmk_rc_str(rc), rc); } else if ((client->buffer != NULL) && (client->buffer->len > 0)) { crm_trace("Received %u-byte reply %d to %s IPC %d: %.100s", client->buffer->len, reply_id, client->server_name, request_id, crm_ipc_buffer(client)); if (reply != NULL) { *reply = pcmk__xml_parse(crm_ipc_buffer(client)); } } /* If bytes == 0, we'll return that to crm_ipc_send which will interpret * that as pcmk_rc_ok, log that the IPC request failed (since we did not * give it a valid reply), and return that 0 to its callers. It's up to * the callers to take appropriate action after that. */ /* Once we've parsed the client buffer as XML and saved it to reply, * there's no need to keep the client buffer around anymore. Free it here * to avoid having to do this anywhere crm_ipc_send is called. */ done: pcmk__ipc_free_client_buffer(client); g_free(buffer); return rc; } static int discard_old_replies(crm_ipc_t *client, int32_t ms_timeout) { pcmk__ipc_header_t *header = NULL; int rc = pcmk_rc_ok; ssize_t qb_rc = 0; char *buffer = pcmk__assert_alloc(crm_ipc_default_buffer_size(), sizeof(char)); qb_rc = qb_ipcc_recv(client->ipc, buffer, crm_ipc_default_buffer_size(), ms_timeout); if (qb_rc < 0) { crm_warn("Sending %s IPC disabled until pending reply received", client->server_name); rc = EALREADY; goto done; } header = (pcmk__ipc_header_t *)(void *) buffer; if (!pcmk__valid_ipc_header(header)) { rc = EBADMSG; } else if (!pcmk__is_set(header->flags, crm_ipc_multipart) || pcmk__is_set(header->flags, crm_ipc_multipart_end)) { crm_notice("Sending %s IPC re-enabled after pending reply received", client->server_name); client->need_reply = FALSE; } else { crm_warn("Sending %s IPC disabled until multipart IPC message " "reply received", client->server_name); rc = EALREADY; } done: free(buffer); return rc; } /*! * \brief Send an IPC XML message * * \param[in,out] client Connection to IPC server * \param[in] message XML message to send * \param[in] flags Bitmask of crm_ipc_flags * \param[in] ms_timeout Give up if not sent within this much time * (5 seconds if 0, or no timeout if negative) * \param[out] reply Reply from server (or NULL if none) * * \return Negative errno on error, otherwise size of reply received in bytes * if reply was needed, otherwise number of bytes sent */ int crm_ipc_send(crm_ipc_t *client, const xmlNode *message, enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply) { int rc = 0; ssize_t bytes = 0; ssize_t sent_bytes = 0; struct iovec *iov = NULL; static uint32_t id = 0; pcmk__ipc_header_t *header; GString *iov_buffer = NULL; uint16_t index = 0; if (client == NULL) { crm_notice("Can't send IPC request without connection (bug?): %.100s", message); return -ENOTCONN; } else if (!crm_ipc_connected(client)) { /* Don't even bother */ crm_notice("Can't send %s IPC requests: Connection closed", client->server_name); return -ENOTCONN; } if (ms_timeout == 0) { ms_timeout = 5000; } /* This block exists only to clear out any old replies that we haven't * yet read. We don't care about their contents since it's too late to * do anything with them, so we just read and throw them away. */ if (client->need_reply) { int discard_rc = discard_old_replies(client, ms_timeout); if (discard_rc != pcmk_rc_ok) { return pcmk_rc2legacy(discard_rc); } } id++; CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */ iov_buffer = g_string_sized_new(1024); pcmk__xml_string(message, 0, iov_buffer, 0); do { ssize_t qb_rc = 0; time_t timeout = 0; rc = pcmk__ipc_prepare_iov(id, iov_buffer, index, &iov, &bytes); if ((rc != pcmk_rc_ok) && (rc != pcmk_rc_ipc_more)) { crm_warn("Couldn't prepare %s IPC request: %s " QB_XS " rc=%d", client->server_name, pcmk_rc_str(rc), rc); g_string_free(iov_buffer, TRUE); return pcmk_rc2legacy(rc); } header = iov[0].iov_base; pcmk__set_ipc_flags(header->flags, client->server_name, flags); if (pcmk__is_set(flags, crm_ipc_proxied)) { /* Don't look for a synchronous response */ pcmk__clear_ipc_flags(flags, "client", crm_ipc_client_response); } if (pcmk__is_set(header->flags, crm_ipc_multipart)) { bool is_end = pcmk__is_set(header->flags, crm_ipc_multipart_end); crm_trace("Sending %s IPC request %" PRId32 " (%spart %" PRIu16 ") of " "%" PRId32 " bytes using %dms timeout", client->server_name, header->qb.id, is_end ? "final " : "", index, header->qb.size, ms_timeout); crm_trace("Text = %s", (char *) iov[1].iov_base); } else { crm_trace("Sending %s IPC request %" PRId32 " of %" PRId32 " bytes " "using %dms timeout", client->server_name, header->qb.id, header->qb.size, ms_timeout); crm_trace("Text = %s", (char *) iov[1].iov_base); } /* Send the IPC request, respecting any timeout we were passed */ if (ms_timeout > 0) { timeout = time(NULL) + 1 + pcmk__timeout_ms2s(ms_timeout); } do { qb_rc = qb_ipcc_sendv(client->ipc, iov, 2); } while ((qb_rc == -EAGAIN) && ((timeout == 0) || (time(NULL) < timeout))); /* An error occurred when sending. */ if (qb_rc <= 0) { rc = (int) qb_rc; // Negative of system errno goto send_cleanup; } /* Sending succeeded. The next action depends on whether this was a * multipart IPC message or not. */ if (rc == pcmk_rc_ok) { /* This was either a standalone IPC message or the last part of * a multipart message. Set the return value and break out of * this processing loop. */ sent_bytes += qb_rc; rc = (int) sent_bytes; break; } else { /* There's no way to get here for any value other than rc == pcmk_rc_more * given the check right after pcmk__ipc_prepare_iov. * * This was a multipart message, loop to process the next chunk. */ sent_bytes += qb_rc; index++; } pcmk_free_ipc_event(iov); iov = NULL; } while (true); /* If we should not wait for a response, bail now */ if (!pcmk__is_set(flags, crm_ipc_client_response)) { crm_trace("Not waiting for reply to %s IPC request %d", client->server_name, header->qb.id); goto send_cleanup; } pcmk__ipc_free_client_buffer(client); rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout, &bytes, reply); if (rc == pcmk_rc_ok) { rc = (int) bytes; // Size of reply received } else { /* rc is either a positive system errno or a negative standard Pacemaker * return code. If it's an errno, we need to convert it back to a * negative number for comparison and return at the end of this function. */ rc = pcmk_rc2legacy(rc); if (ms_timeout > 0) { /* We didn't get the reply in time, so disable future sends for now. * The only alternative would be to close the connection since we * don't know how to detect and discard out-of-sequence replies. * * @TODO Implement out-of-sequence detection */ client->need_reply = TRUE; } } send_cleanup: if (!crm_ipc_connected(client)) { crm_notice("Couldn't send %s IPC request %d: Connection closed " QB_XS " rc=%d", client->server_name, header->qb.id, rc); } else if (rc == -ETIMEDOUT) { crm_warn("%s IPC request %d failed: %s after %dms " QB_XS " rc=%d", client->server_name, header->qb.id, pcmk_strerror(rc), ms_timeout, rc); crm_write_blackbox(0, NULL); } else if (rc <= 0) { crm_warn("%s IPC request %d failed: %s " QB_XS " rc=%d", client->server_name, header->qb.id, ((rc == 0)? "No bytes sent" : pcmk_strerror(rc)), rc); } g_string_free(iov_buffer, TRUE); pcmk_free_ipc_event(iov); // coverity[return_overflow] return rc; } /*! * \brief Ensure an IPC provider has expected user or group * * \param[in] qb_ipc libqb client connection if available * \param[in] sock Connected Unix socket for IPC * \param[in] refuid Expected user ID * \param[in] refgid Expected group ID * \param[out] gotpid If not NULL, where to store provider's actual process ID * (or 1 on platforms where ID is not available) * \param[out] gotuid If not NULL, where to store provider's actual user ID * \param[out] gotgid If not NULL, where to store provider's actual group ID * * \return Standard Pacemaker return code * \note An actual user ID of 0 (root) will always be considered authorized, * regardless of the expected values provided. The caller can use the * output arguments to be stricter than this function. */ static int is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { int rc = EOPNOTSUPP; pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; #ifdef HAVE_QB_IPCC_AUTH_GET if (qb_ipc != NULL) { rc = qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid); rc = -rc; // libqb returns 0 or -errno if (rc == pcmk_rc_ok) { goto found; } } #endif #ifdef HAVE_UCRED { struct ucred ucred; socklen_t ucred_len = sizeof(ucred); if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len) < 0) { rc = errno; } else if (ucred_len != sizeof(ucred)) { rc = EOPNOTSUPP; } else { found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid; goto found; } } #endif #ifdef HAVE_SOCKPEERCRED { struct sockpeercred sockpeercred; socklen_t sockpeercred_len = sizeof(sockpeercred); if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &sockpeercred, &sockpeercred_len) < 0) { rc = errno; } else if (sockpeercred_len != sizeof(sockpeercred)) { rc = EOPNOTSUPP; } else { found_pid = sockpeercred.pid; found_uid = sockpeercred.uid; found_gid = sockpeercred.gid; goto found; } } #endif #ifdef HAVE_GETPEEREID // For example, FreeBSD if (getpeereid(sock, &found_uid, &found_gid) < 0) { rc = errno; } else { found_pid = PCMK__SPECIAL_PID; goto found; } #endif #ifdef HAVE_GETPEERUCRED { ucred_t *ucred = NULL; if (getpeerucred(sock, &ucred) < 0) { rc = errno; } else { found_pid = ucred_getpid(ucred); found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred); ucred_free(ucred); goto found; } } #endif return rc; // If we get here, nothing succeeded found: if (gotpid != NULL) { *gotpid = found_pid; } if (gotuid != NULL) { *gotuid = found_uid; } if (gotgid != NULL) { *gotgid = found_gid; } if ((found_uid != 0) && (found_uid != refuid) && (found_gid != refgid)) { return pcmk_rc_ipc_unauthorized; } return pcmk_rc_ok; } int crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { int ret = is_ipc_provider_expected(NULL, sock, refuid, refgid, gotpid, gotuid, gotgid); /* The old function had some very odd return codes*/ if (ret == 0) { return 1; } else if (ret == pcmk_rc_ipc_unauthorized) { return 0; } else { return pcmk_rc2legacy(ret); } } int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, gid_t refgid, pid_t *gotpid) { static char last_asked_name[PATH_MAX / 2] = ""; /* log spam prevention */ int fd; int rc = pcmk_rc_ipc_unresponsive; int auth_rc = 0; int32_t qb_rc; pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; qb_ipcc_connection_t *c; #ifdef HAVE_QB_IPCC_CONNECT_ASYNC struct pollfd pollfd = { 0, }; int poll_rc; c = qb_ipcc_connect_async(name, 0, &(pollfd.fd)); #else c = qb_ipcc_connect(name, 0); #endif if (c == NULL) { crm_info("Could not connect to %s IPC: %s", name, strerror(errno)); rc = pcmk_rc_ipc_unresponsive; goto bail; } #ifdef HAVE_QB_IPCC_CONNECT_ASYNC pollfd.events = POLLIN; do { poll_rc = poll(&pollfd, 1, 5000); } while ((poll_rc == -1) && (errno == EINTR)); /* If poll() failed, given that disconnect function is not registered yet, * qb_ipcc_disconnect() won't clean up the socket. In any case, call * qb_ipcc_connect_continue() here so that it may fail and do the cleanup * for us. */ if (qb_ipcc_connect_continue(c) != 0) { crm_info("Could not connect to %s IPC: %s", name, (poll_rc == 0)?"timeout":strerror(errno)); rc = pcmk_rc_ipc_unresponsive; c = NULL; // qb_ipcc_connect_continue cleaned up for us goto bail; } #endif qb_rc = qb_ipcc_fd_get(c, &fd); if (qb_rc != 0) { rc = (int) -qb_rc; // System errno crm_err("Could not get fd from %s IPC: %s " QB_XS " rc=%d", name, pcmk_rc_str(rc), rc); goto bail; } auth_rc = is_ipc_provider_expected(c, fd, refuid, refgid, &found_pid, &found_uid, &found_gid); if (auth_rc == pcmk_rc_ipc_unauthorized) { crm_err("Daemon (IPC %s) effectively blocked with unauthorized" " process %lld (uid: %lld, gid: %lld)", name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), (long long) found_uid, (long long) found_gid); rc = pcmk_rc_ipc_unauthorized; goto bail; } if (auth_rc != pcmk_rc_ok) { rc = auth_rc; crm_err("Could not get peer credentials from %s IPC: %s " QB_XS " rc=%d", name, pcmk_rc_str(rc), rc); goto bail; } if (gotpid != NULL) { *gotpid = found_pid; } rc = pcmk_rc_ok; if ((found_uid != refuid || found_gid != refgid) && strncmp(last_asked_name, name, sizeof(last_asked_name))) { if ((found_uid == 0) && (refuid != 0)) { crm_warn("Daemon (IPC %s) runs as root, whereas the expected" " credentials are %lld:%lld, hazard of violating" " the least privilege principle", name, (long long) refuid, (long long) refgid); } else { crm_notice("Daemon (IPC %s) runs as %lld:%lld, whereas the" " expected credentials are %lld:%lld, which may" " mean a different set of privileges than expected", name, (long long) found_uid, (long long) found_gid, (long long) refuid, (long long) refgid); } memccpy(last_asked_name, name, '\0', sizeof(last_asked_name)); } bail: if (c != NULL) { qb_ipcc_disconnect(c); } return rc; } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include bool crm_ipc_connect(crm_ipc_t *client) { int rc = pcmk__connect_generic_ipc(client); if (rc == pcmk_rc_ok) { return true; } if ((client != NULL) && (client->ipc == NULL)) { errno = (rc > 0)? rc : ENOTCONN; crm_debug("Could not establish %s IPC connection: %s (%d)", client->server_name, pcmk_rc_str(errno), errno); } else if (rc == pcmk_rc_ipc_unauthorized) { crm_err("%s IPC provider authentication failed", (client == NULL)? "Pacemaker" : client->server_name); errno = ECONNABORTED; } else { crm_err("Could not verify authenticity of %s IPC provider", (client == NULL)? "Pacemaker" : client->server_name); errno = ENOTCONN; } return false; } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/common/tests/xml_element/Makefile.am b/lib/common/tests/xml_element/Makefile.am index f7288704a9..f980902b45 100644 --- a/lib/common/tests/xml_element/Makefile.am +++ b/lib/common/tests/xml_element/Makefile.am @@ -1,30 +1,30 @@ # # Copyright 2024-2025 the Pacemaker project contributors # # The version control history for this file may have further details. # # This source code is licensed under the GNU General Public License version 2 # or later (GPLv2+) WITHOUT ANY WARRANTY. # include $(top_srcdir)/mk/common.mk include $(top_srcdir)/mk/tap.mk include $(top_srcdir)/mk/unittest.mk # Add "_test" to the end of all test program names to simplify .gitignore check_PROGRAMS = \ pcmk__xe_attr_is_true_test \ pcmk__xe_copy_attrs_test \ pcmk__xe_first_child_test \ pcmk__xe_foreach_child_test \ pcmk__xe_get_bool_test \ pcmk__xe_get_datetime_test \ pcmk__xe_get_flags_test \ pcmk__xe_get_score_test \ pcmk__xe_next_test \ - pcmk__xe_set_bool_attr_test \ + pcmk__xe_set_bool_test \ pcmk__xe_set_id_test \ pcmk__xe_set_score_test \ pcmk__xe_sort_attrs_test TESTS = $(check_PROGRAMS) diff --git a/lib/common/tests/xml_element/pcmk__xe_set_bool_attr_test.c b/lib/common/tests/xml_element/pcmk__xe_set_bool_test.c similarity index 89% rename from lib/common/tests/xml_element/pcmk__xe_set_bool_attr_test.c rename to lib/common/tests/xml_element/pcmk__xe_set_bool_test.c index 8cd5a72f61..c40eef5198 100644 --- a/lib/common/tests/xml_element/pcmk__xe_set_bool_attr_test.c +++ b/lib/common/tests/xml_element/pcmk__xe_set_bool_test.c @@ -1,31 +1,31 @@ /* * Copyright 2021-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include static void set_attr(void **state) { xmlNode *node = pcmk__xml_parse(""); - pcmk__xe_set_bool_attr(node, "a", true); - pcmk__xe_set_bool_attr(node, "b", false); + pcmk__xe_set_bool(node, "a", true); + pcmk__xe_set_bool(node, "b", false); assert_string_equal(pcmk__xe_get(node, "a"), PCMK_VALUE_TRUE); assert_string_equal(pcmk__xe_get(node, "b"), PCMK_VALUE_FALSE); pcmk__xml_free(node); } PCMK__UNIT_TEST(pcmk__xml_test_setup_group, pcmk__xml_test_teardown_group, cmocka_unit_test(set_attr)) diff --git a/lib/common/xml_element.c b/lib/common/xml_element.c index 3c912fe508..0242369c2e 100644 --- a/lib/common/xml_element.c +++ b/lib/common/xml_element.c @@ -1,1847 +1,1851 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include // va_start(), etc. #include // uint32_t #include // NULL, etc. #include // free(), etc. #include // strchr(), etc. #include // time_t, etc. #include // xmlNode, etc. #include // xmlValidateNameValue() #include // xmlChar #include #include // pcmk_rc_ok, etc. #include #include "crmcommon_private.h" /*! * \internal * \brief Find first XML child element matching given criteria * * \param[in] parent XML element to search (can be \c NULL) * \param[in] node_name If not \c NULL, only match children of this type * \param[in] attr_n If not \c NULL, only match children with an attribute * of this name. * \param[in] attr_v If \p attr_n and this are not NULL, only match children * with an attribute named \p attr_n and this value * * \return Matching XML child element, or \c NULL if none found */ xmlNode * pcmk__xe_first_child(const xmlNode *parent, const char *node_name, const char *attr_n, const char *attr_v) { xmlNode *child = NULL; CRM_CHECK((attr_v == NULL) || (attr_n != NULL), return NULL); if (parent == NULL) { return NULL; } child = parent->children; while ((child != NULL) && (child->type != XML_ELEMENT_NODE)) { child = child->next; } for (; child != NULL; child = pcmk__xe_next(child, NULL)) { const char *value = NULL; if ((node_name != NULL) && !pcmk__xe_is(child, node_name)) { // Node name mismatch continue; } if (attr_n == NULL) { // No attribute match needed return child; } value = pcmk__xe_get(child, attr_n); if ((attr_v == NULL) && (value != NULL)) { // attr_v == NULL: Attribute attr_n must be set (to any value) return child; } if ((attr_v != NULL) && (pcmk__str_eq(value, attr_v, pcmk__str_none))) { // attr_v != NULL: Attribute attr_n must be set to value attr_v return child; } } if (attr_n == NULL) { crm_trace("%s XML has no child element of %s type", (const char *) parent->name, pcmk__s(node_name, "any")); } else { crm_trace("%s XML has no child element of %s type with %s='%s'", (const char *) parent->name, pcmk__s(node_name, "any"), attr_n, attr_v); } return NULL; } /*! * \internal * \brief Return next sibling element of an XML element * * \param[in] xml XML element to check * \param[in] element_name If not NULL, get next sibling with this element name * * \return Next desired sibling of \p xml (or NULL if none) */ xmlNode * pcmk__xe_next(const xmlNode *xml, const char *element_name) { for (xmlNode *next = (xml == NULL)? NULL : xml->next; next != NULL; next = next->next) { if ((next->type == XML_ELEMENT_NODE) && ((element_name == NULL) || pcmk__xe_is(next, element_name))) { return next; } } return NULL; } /*! * \internal * \brief Parse an integer score from an XML attribute * * \param[in] xml XML element with attribute to parse * \param[in] name Name of attribute to parse * \param[out] score Where to store parsed score (can be NULL to * just validate) * \param[in] default_score What to return if the attribute value is not * present or invalid * * \return Standard Pacemaker return code */ int pcmk__xe_get_score(const xmlNode *xml, const char *name, int *score, int default_score) { const char *value = NULL; CRM_CHECK((xml != NULL) && (name != NULL), return EINVAL); value = pcmk__xe_get(xml, name); return pcmk_parse_score(value, score, default_score); } /*! * \internal * \brief Set an XML attribute, expanding \c ++ and \c += where appropriate * * If \p target already has an attribute named \p name set to an integer value * and \p value is an addition assignment expression on \p name, then expand * \p value to an integer and set attribute \p name to the expanded value in * \p target. * * Otherwise, set attribute \p name on \p target using the literal \p value. * * The original attribute value in \p target and the number in an assignment * expression in \p value are parsed and added as scores (that is, their values * are capped at \c INFINITY and \c -INFINITY). For more details, refer to * \c pcmk_parse_score(). * * For example, suppose \p target has an attribute named \c "X" with value * \c "5", and that \p name is \c "X". * * If \p value is \c "X++", the new value of \c "X" in \p target is \c "6". * * If \p value is \c "X+=3", the new value of \c "X" in \p target is \c "8". * * If \p value is \c "val", the new value of \c "X" in \p target is \c "val". * * If \p value is \c "Y++", the new value of \c "X" in \p target is \c "Y++". * * \param[in,out] target XML node whose attribute to set * \param[in] name Name of the attribute to set * \param[in] value New value of attribute to set (if NULL, initial value * will be left unchanged) * * \return Standard Pacemaker return code (specifically, \c EINVAL on invalid * argument, or \c pcmk_rc_ok otherwise) */ int pcmk__xe_set_score(xmlNode *target, const char *name, const char *value) { const char *old_value = NULL; CRM_CHECK((target != NULL) && (name != NULL), return EINVAL); if (value == NULL) { // @TODO Maybe instead delete the attribute or set it to 0 return pcmk_rc_ok; } old_value = pcmk__xe_get(target, name); // If no previous value, skip to default case and set the value unexpanded. if (old_value != NULL) { const char *n = name; const char *v = value; // Stop at first character that differs between name and value for (; (*n == *v) && (*n != '\0'); n++, v++); // If value begins with name followed by a "++" or "+=" if ((*n == '\0') && (*v++ == '+') && ((*v == '+') || (*v == '='))) { int add = 1; int old_value_i = 0; int rc = pcmk_rc_ok; // If we're expanding ourselves, no previous value was set; use 0 if (old_value != value) { rc = pcmk_parse_score(old_value, &old_value_i, 0); if (rc != pcmk_rc_ok) { // @TODO This is inconsistent with old_value==NULL crm_trace("Using 0 before incrementing %s because '%s' " "is not a score", name, old_value); } } /* value="X++": new value of X is old_value + 1 * value="X+=Y": new value of X is old_value + Y (for some number Y) */ if (*v != '+') { rc = pcmk_parse_score(++v, &add, 0); if (rc != pcmk_rc_ok) { // @TODO We should probably skip expansion instead crm_trace("Not incrementing %s because '%s' does not have " "a valid increment", name, value); } } pcmk__xe_set_int(target, name, pcmk__add_scores(old_value_i, add)); return pcmk_rc_ok; } } // Default case: set the attribute unexpanded (with value treated literally) if (old_value != value) { pcmk__xe_set(target, name, value); } return pcmk_rc_ok; } /*! * \internal * \brief Copy XML attributes from a source element to a target element * * This is similar to \c xmlCopyPropList() except that attributes are marked * as dirty for change tracking purposes. * * \param[in,out] target XML element to receive copied attributes from \p src * \param[in] src XML element whose attributes to copy to \p target * \param[in] flags Group of enum pcmk__xa_flags * * \return Standard Pacemaker return code */ int pcmk__xe_copy_attrs(xmlNode *target, const xmlNode *src, uint32_t flags) { CRM_CHECK((src != NULL) && (target != NULL), return EINVAL); for (xmlAttr *attr = pcmk__xe_first_attr(src); attr != NULL; attr = attr->next) { const char *name = (const char *) attr->name; const char *value = pcmk__xml_attr_value(attr); if (pcmk__is_set(flags, pcmk__xaf_no_overwrite) && (pcmk__xe_get(target, name) != NULL)) { continue; } if (pcmk__is_set(flags, pcmk__xaf_score_update)) { pcmk__xe_set_score(target, name, value); } else { pcmk__xe_set(target, name, value); } } return pcmk_rc_ok; } /*! * \internal * \brief Compare two XML attributes by name * * \param[in] a First XML attribute to compare * \param[in] b Second XML attribute to compare * * \retval negative \c a->name is \c NULL or comes before \c b->name * lexicographically * \retval 0 \c a->name and \c b->name are equal * \retval positive \c b->name is \c NULL or comes before \c a->name * lexicographically */ static gint compare_xml_attr(gconstpointer a, gconstpointer b) { const xmlAttr *attr_a = a; const xmlAttr *attr_b = b; return pcmk__strcmp((const char *) attr_a->name, (const char *) attr_b->name, pcmk__str_none); } /*! * \internal * \brief Sort an XML element's attributes by name * * This does not consider ACLs and does not mark the attributes as deleted or * dirty. Upon return, all attributes still exist and are set to the same values * as before the call. The only thing that may change is the order of the * attribute list. * * \param[in,out] xml XML element whose attributes to sort */ void pcmk__xe_sort_attrs(xmlNode *xml) { GSList *attr_list = NULL; for (xmlAttr *iter = pcmk__xe_first_attr(xml); iter != NULL; iter = iter->next) { attr_list = g_slist_prepend(attr_list, iter); } attr_list = g_slist_sort(attr_list, compare_xml_attr); for (GSList *iter = attr_list; iter != NULL; iter = iter->next) { xmlNode *attr = iter->data; xmlUnlinkNode(attr); xmlAddChild(xml, attr); } g_slist_free(attr_list); } /*! * \internal * \brief Remove a named attribute from an XML element * * \param[in,out] element XML element to remove an attribute from * \param[in] name Name of attribute to remove */ void pcmk__xe_remove_attr(xmlNode *element, const char *name) { if (name != NULL) { pcmk__xa_remove(xmlHasProp(element, (const xmlChar *) name), false); } } /*! * \internal * \brief Remove a named attribute from an XML element * * This is a wrapper for \c pcmk__xe_remove_attr() for use with * \c pcmk__xml_tree_foreach(). * * \param[in,out] xml XML element to remove an attribute from * \param[in] user_data Name of attribute to remove * * \return \c true (to continue traversing the tree) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ bool pcmk__xe_remove_attr_cb(xmlNode *xml, void *user_data) { const char *name = user_data; pcmk__xe_remove_attr(xml, name); return true; } /*! * \internal * \brief Remove an XML element's attributes that match some criteria * * \param[in,out] element XML element to modify * \param[in] force If \c true, remove matching attributes immediately, * ignoring ACLs and change tracking * \param[in] match If not NULL, only remove attributes for which * this function returns true * \param[in,out] user_data Data to pass to \p match */ void pcmk__xe_remove_matching_attrs(xmlNode *element, bool force, bool (*match)(xmlAttrPtr, void *), void *user_data) { xmlAttrPtr next = NULL; for (xmlAttrPtr a = pcmk__xe_first_attr(element); a != NULL; a = next) { next = a->next; // Grab now because attribute might get removed if ((match == NULL) || match(a, user_data)) { if (pcmk__xa_remove(a, force) != pcmk_rc_ok) { return; } } } } /*! * \internal * \brief Create a new XML element under a given parent * * \param[in,out] parent XML element that will be the new element's parent * (\c NULL to create a new XML document with the new * node as root) * \param[in] name Name of new element * * \return Newly created XML element (guaranteed not to be \c NULL) */ xmlNode * pcmk__xe_create(xmlNode *parent, const char *name) { xmlNode *node = NULL; pcmk__assert(!pcmk__str_empty(name)); if (parent == NULL) { xmlDoc *doc = pcmk__xml_new_doc(); node = xmlNewDocRawNode(doc, NULL, (const xmlChar *) name, NULL); pcmk__mem_assert(node); xmlDocSetRootElement(doc, node); } else { node = xmlNewChild(parent, NULL, (const xmlChar *) name, NULL); pcmk__mem_assert(node); } pcmk__xml_new_private_data(node); return node; } /*! * \internal * \brief Set a formatted string as an XML node's content * * \param[in,out] node Node whose content to set * \param[in] format printf(3)-style format string * \param[in] ... Arguments for \p format * * \note This function escapes special characters. \c xmlNodeSetContent() does * not. */ G_GNUC_PRINTF(2, 3) void pcmk__xe_set_content(xmlNode *node, const char *format, ...) { if (node != NULL) { const char *content = NULL; char *buf = NULL; /* xmlNodeSetContent() frees node->children and replaces it with new * text. If this function is called for a node that already has a non- * text child, it's a bug. */ CRM_CHECK((node->children == NULL) || (node->children->type == XML_TEXT_NODE), return); if (strchr(format, '%') == NULL) { // Nothing to format content = format; } else { va_list ap; va_start(ap, format); if (pcmk__str_eq(format, "%s", pcmk__str_none)) { // No need to make a copy content = va_arg(ap, const char *); } else { pcmk__assert(vasprintf(&buf, format, ap) >= 0); content = buf; } va_end(ap); } xmlNodeSetContent(node, (const xmlChar *) content); free(buf); } } /*! * \internal * \brief Set a formatted string as an XML element's ID * * If the formatted string would not be a valid ID, it's first sanitized by * \c pcmk__xml_sanitize_id(). * * \param[in,out] node Node whose ID to set * \param[in] format printf(3)-style format string * \param[in] ... Arguments for \p format */ G_GNUC_PRINTF(2, 3) void pcmk__xe_set_id(xmlNode *node, const char *format, ...) { char *id = NULL; va_list ap; pcmk__assert(!pcmk__str_empty(format)); if (node == NULL) { return; } va_start(ap, format); pcmk__assert(vasprintf(&id, format, ap) >= 0); va_end(ap); if (!xmlValidateNameValue((const xmlChar *) id)) { pcmk__xml_sanitize_id(id); } pcmk__xe_set(node, PCMK_XA_ID, id); free(id); } /*! * \internal * \brief Add a "last written" attribute to an XML element, set to current time * * \param[in,out] xe XML element to add attribute to * * \return Value that was set, or NULL on error */ const char * pcmk__xe_add_last_written(xmlNode *xe) { char *now_s = pcmk__epoch2str(NULL, 0); pcmk__xe_set(xe, PCMK_XA_CIB_LAST_WRITTEN, pcmk__s(now_s, "Could not determine current time")); free(now_s); return pcmk__xe_get(xe, PCMK_XA_CIB_LAST_WRITTEN); } /*! * \internal * \brief Merge one XML tree into another * * Here, "merge" means: * 1. Copy attribute values from \p update to the target, overwriting in case of * conflict. * 2. Descend through \p update and the target in parallel. At each level, for * each child of \p update, look for a matching child of the target. * a. For each child, if a match is found, go to step 1, recursively merging * the child of \p update into the child of the target. * b. Otherwise, copy the child of \p update as a child of the target. * * A match is defined as the first child of the same type within the target, * with: * * the \c PCMK_XA_ID attribute matching, if set in \p update; otherwise, * * the \c PCMK_XA_ID_REF attribute matching, if set in \p update * * This function does not delete any elements or attributes from the target. It * may add elements or overwrite attributes, as described above. * * \param[in,out] parent If \p target is NULL and this is not, add or update * child of this XML node that matches \p update * \param[in,out] target If not NULL, update this XML * \param[in] update Make the desired XML match this (must not be \c NULL) * \param[in] flags Group of enum pcmk__xa_flags * * \note At least one of \p parent and \p target must be non-NULL. * \note This function is recursive. For the top-level call, \p parent is * \c NULL and \p target is not \c NULL. For recursive calls, \p target is * \c NULL and \p parent is not \c NULL. */ static void update_xe(xmlNode *parent, xmlNode *target, xmlNode *update, uint32_t flags) { // @TODO Try to refactor further, possibly using pcmk__xml_tree_foreach() const char *update_name = NULL; const char *update_id_attr = NULL; const char *update_id_val = NULL; char *trace_s = NULL; crm_log_xml_trace(update, "update"); crm_log_xml_trace(target, "target"); CRM_CHECK(update != NULL, goto done); if (update->type == XML_COMMENT_NODE) { pcmk__xc_update(parent, target, update); goto done; } update_name = (const char *) update->name; CRM_CHECK(update_name != NULL, goto done); CRM_CHECK((target != NULL) || (parent != NULL), goto done); update_id_val = pcmk__xe_id(update); if (update_id_val != NULL) { update_id_attr = PCMK_XA_ID; } else { update_id_val = pcmk__xe_get(update, PCMK_XA_ID_REF); if (update_id_val != NULL) { update_id_attr = PCMK_XA_ID_REF; } } pcmk__if_tracing( { if (update_id_attr != NULL) { trace_s = pcmk__assert_asprintf("<%s %s=%s/>", update_name, update_id_attr, update_id_val); } else { trace_s = pcmk__assert_asprintf("<%s/>", update_name); } }, {} ); if (target == NULL) { // Recursive call target = pcmk__xe_first_child(parent, update_name, update_id_attr, update_id_val); } if (target == NULL) { // Recursive call with no existing matching child target = pcmk__xe_create(parent, update_name); crm_trace("Added %s", pcmk__s(trace_s, update_name)); } else { // Either recursive call with match, or top-level call crm_trace("Found node %s to update", pcmk__s(trace_s, update_name)); } CRM_CHECK(pcmk__xe_is(target, (const char *) update->name), return); pcmk__xe_copy_attrs(target, update, flags); for (xmlNode *child = pcmk__xml_first_child(update); child != NULL; child = pcmk__xml_next(child)) { crm_trace("Updating child of %s", pcmk__s(trace_s, update_name)); update_xe(target, NULL, child, flags); } crm_trace("Finished with %s", pcmk__s(trace_s, update_name)); done: free(trace_s); } /*! * \internal * \brief Delete an XML subtree if it matches a search element * * A match is defined as follows: * * \p xml and \p user_data are both element nodes of the same type. * * If \p user_data has attributes set, \p xml has those attributes set to the * same values. (\p xml may have additional attributes set to arbitrary * values.) * * \param[in,out] xml XML subtree to delete upon match * \param[in] user_data Search element * * \return \c true to continue traversing the tree, or \c false to stop (because * \p xml was deleted) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool delete_xe_if_matching(xmlNode *xml, void *user_data) { xmlNode *search = user_data; if (!pcmk__xe_is(search, (const char *) xml->name)) { // No match: either not both elements, or different element types return true; } for (const xmlAttr *attr = pcmk__xe_first_attr(search); attr != NULL; attr = attr->next) { const char *search_val = pcmk__xml_attr_value(attr); const char *xml_val = pcmk__xe_get(xml, (const char *) attr->name); if (!pcmk__str_eq(search_val, xml_val, pcmk__str_casei)) { // No match: an attr in xml doesn't match the attr in search return true; } } crm_log_xml_trace(xml, "delete-match"); crm_log_xml_trace(search, "delete-search"); pcmk__xml_free(xml); // Found a match and deleted it; stop traversing tree return false; } /*! * \internal * \brief Search an XML tree depth-first and delete the first matching element * * This function does not attempt to match the tree root (\p xml). * * A match with a node \c node is defined as follows: * * \c node and \p search are both element nodes of the same type. * * If \p search has attributes set, \c node has those attributes set to the * same values. (\c node may have additional attributes set to arbitrary * values.) * * \param[in,out] xml XML subtree to search * \param[in] search Element to match against * * \return Standard Pacemaker return code (specifically, \c pcmk_rc_ok on * successful deletion and an error code otherwise) */ int pcmk__xe_delete_match(xmlNode *xml, xmlNode *search) { // See @COMPAT comment in pcmk__xe_replace_match() CRM_CHECK((xml != NULL) && (search != NULL), return EINVAL); for (xml = pcmk__xe_first_child(xml, NULL, NULL, NULL); xml != NULL; xml = pcmk__xe_next(xml, NULL)) { if (!pcmk__xml_tree_foreach(xml, delete_xe_if_matching, search)) { // Found and deleted an element return pcmk_rc_ok; } } // No match found in this subtree return ENXIO; } /*! * \internal * \brief Replace one XML node with a copy of another XML node * * This function handles change tracking and applies ACLs. * * \param[in,out] old XML node to replace * \param[in] new XML node to copy as replacement for \p old * * \note This frees \p old. */ static void replace_node(xmlNode *old, xmlNode *new) { // Pass old for its doc; it won't remain the parent of new new = pcmk__xml_copy(old, new); old = xmlReplaceNode(old, new); // old == NULL means memory allocation error pcmk__assert(old != NULL); // May be unnecessary but avoids slight changes to some test outputs pcmk__xml_tree_foreach(new, pcmk__xml_reset_node_flags, NULL); if (pcmk__xml_doc_all_flags_set(new->doc, pcmk__xf_tracking)) { // Replaced sections may have included relevant ACLs pcmk__apply_acl(new); } pcmk__xml_mark_changes(old, new); pcmk__xml_free_node(old); } /*! * \internal * \brief Replace one XML subtree with a copy of another if the two match * * A match is defined as follows: * * \p xml and \p user_data are both element nodes of the same type. * * If \p user_data has the \c PCMK_XA_ID attribute set, then \p xml has * \c PCMK_XA_ID set to the same value. * * \param[in,out] xml XML subtree to replace with \p user_data upon match * \param[in] user_data XML to replace \p xml with a copy of upon match * * \return \c true to continue traversing the tree, or \c false to stop (because * \p xml was replaced by \p user_data) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool replace_xe_if_matching(xmlNode *xml, void *user_data) { xmlNode *replace = user_data; const char *xml_id = NULL; const char *replace_id = NULL; xml_id = pcmk__xe_id(xml); replace_id = pcmk__xe_id(replace); if (!pcmk__xe_is(replace, (const char *) xml->name)) { // No match: either not both elements, or different element types return true; } if ((replace_id != NULL) && !pcmk__str_eq(replace_id, xml_id, pcmk__str_none)) { // No match: ID was provided in replace and doesn't match xml's ID return true; } crm_log_xml_trace(xml, "replace-match"); crm_log_xml_trace(replace, "replace-with"); replace_node(xml, replace); // Found a match and replaced it; stop traversing tree return false; } /*! * \internal * \brief Search an XML tree depth-first and replace the first matching element * * This function does not attempt to match the tree root (\p xml). * * A match with a node \c node is defined as follows: * * \c node and \p replace are both element nodes of the same type. * * If \p replace has the \c PCMK_XA_ID attribute set, then \c node has * \c PCMK_XA_ID set to the same value. * * \param[in,out] xml XML tree to search * \param[in] replace XML to replace a matching element with a copy of * * \return Standard Pacemaker return code (specifically, \c pcmk_rc_ok on * successful replacement and an error code otherwise) */ int pcmk__xe_replace_match(xmlNode *xml, xmlNode *replace) { /* @COMPAT Some of this behavior (like not matching the tree root, which is * allowed by pcmk__xe_update_match()) is questionable for general use but * required for backward compatibility by cib_process_replace() and * cib_process_delete(). Behavior can change at a major version release if * desired. */ CRM_CHECK((xml != NULL) && (replace != NULL), return EINVAL); for (xml = pcmk__xe_first_child(xml, NULL, NULL, NULL); xml != NULL; xml = pcmk__xe_next(xml, NULL)) { if (!pcmk__xml_tree_foreach(xml, replace_xe_if_matching, replace)) { // Found and replaced an element return pcmk_rc_ok; } } // No match found in this subtree return ENXIO; } //! User data for \c update_xe_if_matching() struct update_data { xmlNode *update; //!< Update source uint32_t flags; //!< Group of enum pcmk__xa_flags }; /*! * \internal * \brief Update one XML subtree with another if the two match * * "Update" means to merge a source subtree into a target subtree (see * \c update_xe()). * * A match is defined as follows: * * \p xml and \p user_data->update are both element nodes of the same type. * * \p xml and \p user_data->update have the same \c PCMK_XA_ID attribute * value, or \c PCMK_XA_ID is unset in both * * \param[in,out] xml XML subtree to update with \p user_data->update * upon match * \param[in] user_data struct update_data object * * \return \c true to continue traversing the tree, or \c false to stop (because * \p xml was updated by \p user_data->update) * * \note This is compatible with \c pcmk__xml_tree_foreach(). */ static bool update_xe_if_matching(xmlNode *xml, void *user_data) { struct update_data *data = user_data; xmlNode *update = data->update; if (!pcmk__xe_is(update, (const char *) xml->name)) { // No match: either not both elements, or different element types return true; } if (!pcmk__str_eq(pcmk__xe_id(xml), pcmk__xe_id(update), pcmk__str_none)) { // No match: ID mismatch return true; } crm_log_xml_trace(xml, "update-match"); crm_log_xml_trace(update, "update-with"); update_xe(NULL, xml, update, data->flags); // Found a match and replaced it; stop traversing tree return false; } /*! * \internal * \brief Search an XML tree depth-first and update the first matching element * * "Update" means to merge a source subtree into a target subtree (see * \c update_xe()). * * A match with a node \c node is defined as follows: * * \c node and \p update are both element nodes of the same type. * * \c node and \p update have the same \c PCMK_XA_ID attribute value, or * \c PCMK_XA_ID is unset in both * * \param[in,out] xml XML tree to search * \param[in] update XML to update a matching element with * \param[in] flags Group of enum pcmk__xa_flags * * \return Standard Pacemaker return code (specifically, \c pcmk_rc_ok on * successful update and an error code otherwise) */ int pcmk__xe_update_match(xmlNode *xml, xmlNode *update, uint32_t flags) { /* @COMPAT In pcmk__xe_delete_match() and pcmk__xe_replace_match(), we * compare IDs only if the equivalent of the update argument has an ID. * Here, we're stricter: we consider it a mismatch if only one element has * an ID attribute, or if both elements have IDs but they don't match. * * Perhaps we should align the behavior at a major version release. */ struct update_data data = { .update = update, .flags = flags, }; CRM_CHECK((xml != NULL) && (update != NULL), return EINVAL); if (!pcmk__xml_tree_foreach(xml, update_xe_if_matching, &data)) { // Found and updated an element return pcmk_rc_ok; } // No match found in this subtree return ENXIO; } void pcmk__xe_set_propv(xmlNodePtr node, va_list pairs) { while (true) { const char *name, *value; name = va_arg(pairs, const char *); if (name == NULL) { return; } value = va_arg(pairs, const char *); pcmk__xe_set(node, name, value); } } void pcmk__xe_set_props(xmlNodePtr node, ...) { va_list pairs; va_start(pairs, node); pcmk__xe_set_propv(node, pairs); va_end(pairs); } int pcmk__xe_foreach_child(xmlNode *xml, const char *child_element_name, int (*handler)(xmlNode *xml, void *userdata), void *userdata) { xmlNode *children = (xml? xml->children : NULL); pcmk__assert(handler != NULL); for (xmlNode *node = children; node != NULL; node = node->next) { if ((node->type == XML_ELEMENT_NODE) && ((child_element_name == NULL) || pcmk__xe_is(node, child_element_name))) { int rc = handler(node, userdata); if (rc != pcmk_rc_ok) { return rc; } } } return pcmk_rc_ok; } // XML attribute handling /*! * \internal * \brief Retrieve the value of an XML attribute * * \param[in] xml XML element whose attribute to get * \param[in] attr_name Attribute name * * \return Value of specified attribute (may be \c NULL) */ const char * pcmk__xe_get(const xmlNode *xml, const char *attr_name) { xmlAttr *attr = NULL; CRM_CHECK((xml != NULL) && (attr_name != NULL), return NULL); attr = xmlHasProp(xml, (const xmlChar *) attr_name); if ((attr == NULL) || (attr->children == NULL)) { return NULL; } return (const char *) attr->children->content; } /*! * \internal * \brief Set an XML attribute value * * This also performs change tracking if enabled and checks ACLs if enabled. * Upon ACL denial, the attribute is not updated. * * \param[in,out] xml XML element whose attribute to set * \param[in] attr_name Attribute name * \param[in] value Attribute value to set * * \return Standard Pacemaker return code * * \note This does nothing and returns \c pcmk_rc_ok if \p value is \c NULL. */ int pcmk__xe_set(xmlNode *xml, const char *attr_name, const char *value) { bool dirty = false; xmlAttr *attr = NULL; CRM_CHECK((xml != NULL) && (attr_name != NULL), return EINVAL); if (value == NULL) { return pcmk_rc_ok; } /* @TODO Can we return early if dirty is false? Or does anything rely on * "cleanly" resetting the value? If so, try to preserve existing behavior * for public API functions that depend on this. */ if (pcmk__xml_doc_all_flags_set(xml->doc, pcmk__xf_tracking)) { const char *old_value = pcmk__xe_get(xml, attr_name); if (!pcmk__str_eq(old_value, value, pcmk__str_none)) { dirty = true; } } if (dirty && !pcmk__check_acl(xml, attr_name, pcmk__xf_acl_create)) { crm_trace("Cannot add %s=%s to %s", attr_name, value, (const char *) xml->name); return EACCES; } attr = xmlSetProp(xml, (const xmlChar *) attr_name, (const xmlChar *) value); // These should never be NULL -- out of memory? CRM_CHECK((attr != NULL) && (attr->children != NULL) && (attr->children->content != NULL), xmlRemoveProp(attr); return ENXIO); /* If the attribute already exists, this does nothing. Attribute values * don't get private data. */ pcmk__xml_new_private_data((xmlNode *) attr); if (dirty) { pcmk__mark_xml_attr_dirty(attr); } return pcmk_rc_ok; } /*! * \internal * \brief Retrieve a flag group from an XML attribute value * * This is like \c pcmk__xe_get() but returns the value as a \c uint32_t. * * \param[in] xml XML node to check * \param[in] name Attribute name to check (must not be NULL) * \param[out] dest Where to store flags (may be NULL to just * validate type) * \param[in] default_value What to use for missing or invalid value * * \return Standard Pacemaker return code */ int pcmk__xe_get_flags(const xmlNode *xml, const char *name, uint32_t *dest, uint32_t default_value) { const char *value = NULL; long long value_ll = 0LL; int rc = pcmk_rc_ok; if (dest != NULL) { *dest = default_value; } if (name == NULL) { return EINVAL; } if (xml == NULL) { return pcmk_rc_ok; } value = pcmk__xe_get(xml, name); if (value == NULL) { return pcmk_rc_ok; } rc = pcmk__scan_ll(value, &value_ll, default_value); if ((value_ll < 0) || (value_ll > UINT32_MAX)) { value_ll = default_value; if (rc == pcmk_rc_ok) { rc = pcmk_rc_bad_input; } } if (dest != NULL) { *dest = (uint32_t) value_ll; } return rc; } /*! * \internal * \brief Retrieve a \c guint value from an XML attribute * * This is like \c pcmk__xe_get() but returns the value as a \c guint. * * \param[in] xml XML element whose attribute to get * \param[in] attr Attribute name * \param[out] dest Where to store attribute value (unchanged on error) * * \return Standard Pacemaker return code */ int pcmk__xe_get_guint(const xmlNode *xml, const char *attr, guint *dest) { long long value_ll = 0; int rc = pcmk_rc_ok; CRM_CHECK((xml != NULL) && (attr != NULL) && (dest != NULL), return EINVAL); rc = pcmk__xe_get_ll(xml, attr, &value_ll); if (rc != pcmk_rc_ok) { return rc; } if ((value_ll < 0) || (value_ll > G_MAXUINT)) { return ERANGE; } *dest = (guint) value_ll; return pcmk_rc_ok; } /*! * \internal * \brief Set an XML attribute using a \c guint value * * This is like \c pcmk__xe_set() but takes a \c guint. * * \param[in,out] xml XML node to modify * \param[in] attr Attribute name * \param[in] value Attribute value to set */ void pcmk__xe_set_guint(xmlNode *xml, const char *attr, guint value) { char *value_s = NULL; CRM_CHECK((xml != NULL) && (attr != NULL), return); value_s = pcmk__assert_asprintf("%u", value); pcmk__xe_set(xml, attr, value_s); free(value_s); } /*! * \internal * \brief Retrieve an \c int value from an XML attribute * * This is like \c pcmk__xe_get() but returns the value as an \c int. * * \param[in] xml XML element whose attribute to get * \param[in] attr Attribute name * \param[out] dest Where to store element value (unchanged on error) * * \return Standard Pacemaker return code */ int pcmk__xe_get_int(const xmlNode *xml, const char *attr, int *dest) { long long value_ll = 0; int rc = pcmk_rc_ok; CRM_CHECK((xml != NULL) && (attr != NULL) && (dest != NULL), return EINVAL); rc = pcmk__xe_get_ll(xml, attr, &value_ll); if (rc != pcmk_rc_ok) { return rc; } if ((value_ll < INT_MIN) || (value_ll > INT_MAX)) { return ERANGE; } *dest = (int) value_ll; return pcmk_rc_ok; } /*! * \internal * \brief Set an XML attribute using an \c int value. * * This is like \c pcmk__xe_set() but takes an \c int. * * \param[in,out] xml XML node to modify * \param[in] attr Attribute name * \param[in] value Attribute value to set */ void pcmk__xe_set_int(xmlNode *xml, const char *attr, int value) { char *value_s = NULL; CRM_CHECK((xml != NULL) && (attr != NULL), return); value_s = pcmk__itoa(value); pcmk__xe_set(xml, attr, value_s); free(value_s); } /*! * \internal * \brief Retrieve a long long value from an XML attribute * * This is like \c pcmk__xe_get() but returns the value as a long long. * * \param[in] xml XML element whose attribute to get * \param[in] attr Attribute name * \param[out] dest Where to store element value (unchanged on error) * * \return Standard Pacemaker return code */ int pcmk__xe_get_ll(const xmlNode *xml, const char *attr, long long *dest) { const char *value = NULL; long long value_ll = 0; int rc = pcmk_rc_ok; CRM_CHECK((xml != NULL) && (attr != NULL) && (dest != NULL), return EINVAL); value = pcmk__xe_get(xml, attr); if (value == NULL) { return ENXIO; } rc = pcmk__scan_ll(value, &value_ll, PCMK__PARSE_INT_DEFAULT); if (rc != pcmk_rc_ok) { return rc; } *dest = value_ll; return pcmk_rc_ok; } /*! * \internal * \brief Set an XML attribute using a long long value * * This is like \c pcmk__xe_set() but takes a long long. * * \param[in,out] xml XML node to modify * \param[in] attr Attribute name * \param[in] value Attribute value to set * * \return Standard Pacemaker return code */ int pcmk__xe_set_ll(xmlNode *xml, const char *attr, long long value) { char *value_s = NULL; int rc = pcmk_rc_ok; CRM_CHECK((xml != NULL) && (attr != NULL), return EINVAL); value_s = pcmk__assert_asprintf("%lld", value); rc = pcmk__xe_set(xml, attr, value_s); free(value_s); return rc; } /*! * \internal * \brief Retrieve a \c time_t value from an XML attribute * * This is like \c pcmk__xe_get() but returns the value as a \c time_t. * * \param[in] xml XML element whose attribute to get * \param[in] attr Attribute name * \param[out] dest Where to store attribute value (unchanged on error) * * \return Standard Pacemaker return code */ int pcmk__xe_get_time(const xmlNode *xml, const char *attr, time_t *dest) { long long value_ll = 0; int rc = pcmk_rc_ok; CRM_CHECK((xml != NULL) && (attr != NULL) && (dest != NULL), return EINVAL); rc = pcmk__xe_get_ll(xml, attr, &value_ll); if (rc != pcmk_rc_ok) { return rc; } /* We don't do any bounds checking, since there are no constants provided * for the bounds of time_t, and calculating them isn't worth the effort. If * there are XML values beyond the native sizes, there will likely be worse * problems anyway. */ *dest = (time_t) value_ll; return pcmk_rc_ok; } /*! * \internal * \brief Set an XML attribute using a \c time_t value * * This is like \c pcmk__xe_set() but takes a \c time_t. * * \param[in,out] xml XML element whose attribute to set * \param[in] attr Attribute name * \param[in] value Attribute value to set (in seconds) */ void pcmk__xe_set_time(xmlNode *xml, const char *attr, time_t value) { // Could be inline, but keep it underneath pcmk__xe_get_time() CRM_CHECK((xml != NULL) && (attr != NULL), return); pcmk__xe_set_ll(xml, attr, (long long) value); } /*! * \internal * \brief Retrieve the values of XML second/microsecond attributes as time * * This is like \c pcmk__xe_get() but returns the value as a * struct timeval. * * \param[in] xml XML element whose attributes to get * \param[in] sec_attr Name of XML attribute for seconds * \param[in] usec_attr Name of XML attribute for microseconds * \param[out] dest Where to store result (unchanged on error) * * \return Standard Pacemaker return code */ int pcmk__xe_get_timeval(const xmlNode *xml, const char *sec_attr, const char *usec_attr, struct timeval *dest) { long long value_ll = 0; struct timeval result = { 0, 0 }; int rc = pcmk_rc_ok; // Could allow one of sec_attr and usec_attr to be NULL in the future CRM_CHECK((xml != NULL) && (sec_attr != NULL) && (usec_attr != NULL) && (dest != NULL), return EINVAL); // No bounds checking; see comment in pcmk__xe_get_time() // Parse seconds rc = pcmk__xe_get_time(xml, sec_attr, &(result.tv_sec)); if (rc != pcmk_rc_ok) { return rc; } // Parse microseconds rc = pcmk__xe_get_ll(xml, usec_attr, &value_ll); if (rc != pcmk_rc_ok) { return rc; } result.tv_usec = (suseconds_t) value_ll; *dest = result; return pcmk_rc_ok; } /*! * \internal * \brief Set XML attribute values for seconds and microseconds * * This is like \c pcmk__xe_set() but takes a struct timeval *. * * \param[in,out] xml XML element whose attributes to set * \param[in] sec_attr Name of XML attribute for seconds * \param[in] usec_attr Name of XML attribute for microseconds * \param[in] value Attribute values to set * * \note This does nothing if \p value is \c NULL. */ void pcmk__xe_set_timeval(xmlNode *xml, const char *sec_attr, const char *usec_attr, const struct timeval *value) { CRM_CHECK((xml != NULL) && (sec_attr != NULL) && (usec_attr != NULL), return); if (value == NULL) { return; } if (pcmk__xe_set_ll(xml, sec_attr, (long long) value->tv_sec) != pcmk_rc_ok) { return; } /* Seconds were added successfully. Ignore any errors adding microseconds. * * It would be nice to make this atomic: revert the seconds attribute if * adding the microseconds attribute fails. That's somewhat complicated due * to change tracking: the chain of parents is already marked dirty, etc. In * practice, microseconds should succeed if seconds succeeded, unless it's * due to memory allocation failure. Nothing checks the return values of * these setter functions at time of writing, anyway. */ pcmk__xe_set_ll(xml, usec_attr, (long long) value->tv_usec); } /*! * \internal * \brief Get a date/time object from an XML attribute value * * \param[in] xml XML with attribute to parse (from CIB) * \param[in] attr Name of attribute to parse * \param[out] t Where to create date/time object * (\p *t must be NULL initially) * * \return Standard Pacemaker return code * \note The caller is responsible for freeing \p *t using crm_time_free(). */ int pcmk__xe_get_datetime(const xmlNode *xml, const char *attr, crm_time_t **t) { const char *value = NULL; if ((t == NULL) || (*t != NULL) || (xml == NULL) || (attr == NULL)) { return EINVAL; } value = pcmk__xe_get(xml, attr); if (value != NULL) { *t = crm_time_new(value); if (*t == NULL) { return pcmk_rc_unpack_error; } } return pcmk_rc_ok; } -/*! - * \internal - * \brief Add a boolean attribute to an XML node. - * - * \param[in,out] node XML node to add attributes to - * \param[in] name XML attribute to create - * \param[in] value Value to give to the attribute - */ -void -pcmk__xe_set_bool_attr(xmlNodePtr node, const char *name, bool value) -{ - pcmk__xe_set(node, name, pcmk__btoa(value)); -} - /*! * \internal * \brief Retrieve a boolean value from an XML attribute * * This is like \c pcmk__xe_get() but returns the value as a \c bool. * * \param[in] xml XML element whose attribute to get * \param[in] attr Attribute name * \param[out] dest Where to store result (unchanged on error) * * \return Standard Pacemaker return code */ int pcmk__xe_get_bool(const xmlNode *xml, const char *attr, bool *dest) { const char *xml_value = NULL; CRM_CHECK((xml != NULL) && (attr != NULL) && (dest != NULL), return EINVAL); xml_value = pcmk__xe_get(xml, attr); if (xml_value == NULL) { return ENXIO; } return pcmk__parse_bool(xml_value, dest); } +/*! + * \internal + * \brief Set an XML attribute using a boolean value + * + * This is like \c pcmk__xe_set() but takes a \c bool. + * + * \param[in,out] xml XML element whose attribute to set + * \param[in] attr Attribute name + * \param[in] value Attribute value to set + * + * \return Standard Pacemaker return code + */ +void +pcmk__xe_set_bool(xmlNode *xml, const char *attr, bool value) +{ + pcmk__xe_set(xml, attr, pcmk__btoa(value)); +} + /*! * \internal * \brief Extract a boolean attribute's value from an XML element * * \param[in] node XML node to get attribute from * \param[in] name XML attribute to get * * \return True if the given \p name is an attribute on \p node and has * the value \c PCMK_VALUE_TRUE, False in all other cases */ bool pcmk__xe_attr_is_true(const xmlNode *node, const char *name) { bool value = false; int rc; rc = pcmk__xe_get_bool(node, name, &value); return rc == pcmk_rc_ok && value == true; } // Deprecated functions kept only for backward API compatibility // LCOV_EXCL_START #include // gboolean, GSList #include // pcmk_xml_attrs2nvpairs(), etc. #include // crm_xml_sanitize_id() #include xmlNode * expand_idref(xmlNode *input, xmlNode *top) { return pcmk__xe_resolve_idref(input, top); } const char * crm_xml_add(xmlNode *node, const char *name, const char *value) { bool dirty = FALSE; xmlAttr *attr = NULL; CRM_CHECK(node != NULL, return NULL); CRM_CHECK(name != NULL, return NULL); if (value == NULL) { return NULL; } if (pcmk__xml_doc_all_flags_set(node->doc, pcmk__xf_tracking)) { const char *old = pcmk__xe_get(node, name); if (old == NULL || value == NULL || strcmp(old, value) != 0) { dirty = TRUE; } } if (dirty && (pcmk__check_acl(node, name, pcmk__xf_acl_create) == FALSE)) { crm_trace("Cannot add %s=%s to %s", name, value, node->name); return NULL; } attr = xmlSetProp(node, (const xmlChar *) name, (const xmlChar *) value); /* If the attribute already exists, this does nothing. Attribute values * don't get private data. */ pcmk__xml_new_private_data((xmlNode *) attr); if (dirty) { pcmk__mark_xml_attr_dirty(attr); } CRM_CHECK(attr && attr->children && attr->children->content, return NULL); return (char *)attr->children->content; } void crm_xml_set_id(xmlNode *xml, const char *format, ...) { va_list ap; int len = 0; char *id = NULL; // Equivalent to pcmk__assert_asprintf() va_start(ap, format); len = vasprintf(&id, format, ap); va_end(ap); pcmk__assert(len > 0); crm_xml_sanitize_id(id); crm_xml_add(xml, PCMK_XA_ID, id); free(id); } xmlNode * sorted_xml(xmlNode *input, xmlNode *parent, gboolean recursive) { xmlNode *child = NULL; GSList *nvpairs = NULL; xmlNode *result = NULL; CRM_CHECK(input != NULL, return NULL); result = pcmk__xe_create(parent, (const char *) input->name); nvpairs = pcmk_xml_attrs2nvpairs(input); nvpairs = pcmk_sort_nvpairs(nvpairs); pcmk_nvpairs2xml_attrs(nvpairs, result); pcmk_free_nvpairs(nvpairs); for (child = pcmk__xe_first_child(input, NULL, NULL, NULL); child != NULL; child = pcmk__xe_next(child, NULL)) { if (recursive) { sorted_xml(child, result, recursive); } else { pcmk__xml_copy(result, child); } } return result; } const char * crm_element_value(const xmlNode *data, const char *name) { xmlAttr *attr = NULL; if (data == NULL) { crm_err("Couldn't find %s in NULL", name ? name : ""); CRM_LOG_ASSERT(data != NULL); return NULL; } else if (name == NULL) { crm_err("Couldn't find NULL in %s", data->name); return NULL; } attr = xmlHasProp(data, (const xmlChar *) name); if (!attr || !attr->children) { return NULL; } return (const char *) attr->children->content; } const char * crm_copy_xml_element(const xmlNode *obj1, xmlNode *obj2, const char *element) { const char *value = pcmk__xe_get(obj1, element); crm_xml_add(obj2, element, value); return value; } int crm_element_value_ll(const xmlNode *data, const char *name, long long *dest) { const char *value = NULL; CRM_CHECK(dest != NULL, return -1); value = pcmk__xe_get(data, name); if (value != NULL) { int rc = pcmk__scan_ll(value, dest, PCMK__PARSE_INT_DEFAULT); if (rc == pcmk_rc_ok) { return 0; } crm_warn("Using default for %s " "because '%s' is not a valid integer: %s", name, value, pcmk_rc_str(rc)); } return -1; } int crm_element_value_timeval(const xmlNode *xml, const char *name_sec, const char *name_usec, struct timeval *dest) { long long value_i = 0; CRM_CHECK(dest != NULL, return -EINVAL); dest->tv_sec = 0; dest->tv_usec = 0; if (xml == NULL) { return pcmk_ok; } // No bounds checking; see comment in pcmk__xe_get_time() // Parse seconds errno = 0; if (crm_element_value_ll(xml, name_sec, &value_i) < 0) { return -errno; } dest->tv_sec = (time_t) value_i; // Parse microseconds if (crm_element_value_ll(xml, name_usec, &value_i) < 0) { return -errno; } dest->tv_usec = (suseconds_t) value_i; return pcmk_ok; } int crm_element_value_epoch(const xmlNode *xml, const char *name, time_t *dest) { long long value_ll = 0; if (crm_element_value_ll(xml, name, &value_ll) < 0) { return -1; } // No bounds checking; see comment in pcmk__xe_get_time() *dest = (time_t) value_ll; return pcmk_ok; } int crm_element_value_ms(const xmlNode *data, const char *name, guint *dest) { const char *value = NULL; long long value_ll; int rc = pcmk_rc_ok; CRM_CHECK(dest != NULL, return -1); *dest = 0; value = pcmk__xe_get(data, name); rc = pcmk__scan_ll(value, &value_ll, 0LL); if (rc != pcmk_rc_ok) { crm_warn("Using default for %s " "because '%s' is not valid milliseconds: %s", name, value, pcmk_rc_str(rc)); return -1; } if ((value_ll < 0) || (value_ll > G_MAXUINT)) { crm_warn("Using default for %s because '%s' is out of range", name, value); return -1; } *dest = (guint) value_ll; return pcmk_ok; } int crm_element_value_int(const xmlNode *data, const char *name, int *dest) { const char *value = NULL; CRM_CHECK(dest != NULL, return -1); value = pcmk__xe_get(data, name); if (value) { long long value_ll; int rc = pcmk__scan_ll(value, &value_ll, 0LL); *dest = PCMK__PARSE_INT_DEFAULT; if (rc != pcmk_rc_ok) { crm_warn("Using default for %s " "because '%s' is not a valid integer: %s", name, value, pcmk_rc_str(rc)); } else if ((value_ll < INT_MIN) || (value_ll > INT_MAX)) { crm_warn("Using default for %s because '%s' is out of range", name, value); } else { *dest = (int) value_ll; return 0; } } return -1; } char * crm_element_value_copy(const xmlNode *data, const char *name) { CRM_CHECK((data != NULL) && (name != NULL), return NULL); return pcmk__str_copy(pcmk__xe_get(data, name)); } const char * crm_xml_add_ll(xmlNode *xml, const char *name, long long value) { char *str = pcmk__assert_asprintf("%lld", value); const char *result = crm_xml_add(xml, name, str); free(str); return result; } const char * crm_xml_add_timeval(xmlNode *xml, const char *name_sec, const char *name_usec, const struct timeval *value) { const char *added = NULL; if (xml && name_sec && value) { added = crm_xml_add_ll(xml, name_sec, (long long) value->tv_sec); if (added && name_usec) { // Any error is ignored (we successfully added seconds) crm_xml_add_ll(xml, name_usec, (long long) value->tv_usec); } } return added; } const char * crm_xml_add_ms(xmlNode *node, const char *name, guint ms) { char *number = pcmk__assert_asprintf("%u", ms); const char *added = crm_xml_add(node, name, number); free(number); return added; } const char * crm_xml_add_int(xmlNode *node, const char *name, int value) { char *number = pcmk__itoa(value); const char *added = crm_xml_add(node, name, number); free(number); return added; } // LCOV_EXCL_STOP // End deprecated API diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c index 2d687ce1d4..b388a1a74b 100644 --- a/lib/lrmd/lrmd_client.c +++ b/lib/lrmd/lrmd_client.c @@ -1,2677 +1,2677 @@ /* * Copyright 2012-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include // uint32_t, uint64_t #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // stonith__* #include #include #include #include #include #include #define MAX_TLS_RECV_WAIT 10000 CRM_TRACE_INIT_DATA(lrmd); static int lrmd_api_disconnect(lrmd_t * lrmd); static int lrmd_api_is_connected(lrmd_t * lrmd); /* IPC proxy functions */ int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); static void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg); void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); // GnuTLS client handshake timeout in seconds #define TLS_HANDSHAKE_TIMEOUT 5 static void lrmd_tls_disconnect(lrmd_t * lrmd); static int global_remote_msg_id = 0; static void lrmd_tls_connection_destroy(gpointer userdata); static int add_tls_to_mainloop(lrmd_t *lrmd, bool do_api_handshake); typedef struct lrmd_private_s { uint64_t type; char *token; mainloop_io_t *source; /* IPC parameters */ crm_ipc_t *ipc; pcmk__remote_t *remote; /* Extra TLS parameters */ char *remote_nodename; char *server; int port; pcmk__tls_t *tls; /* while the async connection is occurring, this is the id * of the connection timeout timer. */ int async_timer; int sock; /* since tls requires a round trip across the network for a * request/reply, there are times where we just want to be able * to send a request from the client and not wait around (or even care * about) what the reply is. */ int expected_late_replies; GList *pending_notify; crm_trigger_t *process_notify; crm_trigger_t *handshake_trigger; lrmd_event_callback callback; /* Internal IPC proxy msg passing for remote guests */ void (*proxy_callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg); void *proxy_callback_userdata; char *peer_version; } lrmd_private_t; static int process_lrmd_handshake_reply(xmlNode *reply, lrmd_private_t *native); static void report_async_connection_result(lrmd_t * lrmd, int rc); static lrmd_list_t * lrmd_list_add(lrmd_list_t * head, const char *value) { lrmd_list_t *p, *end; p = pcmk__assert_alloc(1, sizeof(lrmd_list_t)); p->val = strdup(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void lrmd_list_freeall(lrmd_list_t * head) { lrmd_list_t *p; while (head) { char *val = (char *)head->val; p = head->next; free(val); free(head); head = p; } } lrmd_key_value_t * lrmd_key_value_add(lrmd_key_value_t * head, const char *key, const char *value) { lrmd_key_value_t *p, *end; p = pcmk__assert_alloc(1, sizeof(lrmd_key_value_t)); p->key = strdup(key); p->value = strdup(value); end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void lrmd_key_value_freeall(lrmd_key_value_t * head) { lrmd_key_value_t *p; while (head) { p = head->next; free(head->key); free(head->value); free(head); head = p; } } /*! * \brief Create a new lrmd_event_data_t object * * \param[in] rsc_id ID of resource involved in event * \param[in] task Action name * \param[in] interval_ms Action interval * * \return Newly allocated and initialized lrmd_event_data_t * \note This functions asserts on memory errors, so the return value is * guaranteed to be non-NULL. The caller is responsible for freeing the * result with lrmd_free_event(). */ lrmd_event_data_t * lrmd_new_event(const char *rsc_id, const char *task, guint interval_ms) { lrmd_event_data_t *event = pcmk__assert_alloc(1, sizeof(lrmd_event_data_t)); // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees event->rsc_id = pcmk__str_copy(rsc_id); event->op_type = pcmk__str_copy(task); event->interval_ms = interval_ms; return event; } lrmd_event_data_t * lrmd_copy_event(lrmd_event_data_t * event) { lrmd_event_data_t *copy = NULL; copy = pcmk__assert_alloc(1, sizeof(lrmd_event_data_t)); copy->type = event->type; // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees copy->rsc_id = pcmk__str_copy(event->rsc_id); copy->op_type = pcmk__str_copy(event->op_type); copy->user_data = pcmk__str_copy(event->user_data); copy->output = pcmk__str_copy(event->output); copy->remote_nodename = pcmk__str_copy(event->remote_nodename); copy->exit_reason = pcmk__str_copy(event->exit_reason); copy->call_id = event->call_id; copy->timeout = event->timeout; copy->interval_ms = event->interval_ms; copy->start_delay = event->start_delay; copy->rsc_deleted = event->rsc_deleted; copy->rc = event->rc; copy->op_status = event->op_status; copy->t_run = event->t_run; copy->t_rcchange = event->t_rcchange; copy->exec_time = event->exec_time; copy->queue_time = event->queue_time; copy->connection_rc = event->connection_rc; copy->params = pcmk__str_table_dup(event->params); return copy; } /*! * \brief Free an executor event * * \param[in,out] Executor event object to free */ void lrmd_free_event(lrmd_event_data_t *event) { if (event == NULL) { return; } // @TODO Why are these const char *? free((void *) event->rsc_id); free((void *) event->op_type); free((void *) event->user_data); free((void *) event->remote_nodename); lrmd__reset_result(event); if (event->params != NULL) { g_hash_table_destroy(event->params); } free(event); } static void lrmd_dispatch_internal(gpointer data, gpointer user_data) { xmlNode *msg = data; lrmd_t *lrmd = user_data; const char *type; const char *proxy_session = pcmk__xe_get(msg, PCMK__XA_LRMD_IPC_SESSION); lrmd_private_t *native = lrmd->lrmd_private; lrmd_event_data_t event = { 0, }; if (proxy_session != NULL) { /* this is proxy business */ lrmd_internal_proxy_dispatch(lrmd, msg); return; } else if (!native->callback) { /* no callback set */ crm_trace("notify event received but client has not set callback"); return; } event.remote_nodename = native->remote_nodename; type = pcmk__xe_get(msg, PCMK__XA_LRMD_OP); pcmk__xe_get_int(msg, PCMK__XA_LRMD_CALLID, &event.call_id); event.rsc_id = pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_ID); if (pcmk__str_eq(type, LRMD_OP_RSC_REG, pcmk__str_none)) { event.type = lrmd_event_register; } else if (pcmk__str_eq(type, LRMD_OP_RSC_UNREG, pcmk__str_none)) { event.type = lrmd_event_unregister; } else if (pcmk__str_eq(type, LRMD_OP_RSC_EXEC, pcmk__str_none)) { int rc = 0; int exec_time = 0; int queue_time = 0; pcmk__xe_get_int(msg, PCMK__XA_LRMD_TIMEOUT, &event.timeout); pcmk__xe_get_guint(msg, PCMK__XA_LRMD_RSC_INTERVAL, &event.interval_ms); pcmk__xe_get_int(msg, PCMK__XA_LRMD_RSC_START_DELAY, &event.start_delay); pcmk__xe_get_int(msg, PCMK__XA_LRMD_EXEC_RC, &rc); event.rc = (enum ocf_exitcode) rc; pcmk__xe_get_int(msg, PCMK__XA_LRMD_EXEC_OP_STATUS, &event.op_status); pcmk__xe_get_int(msg, PCMK__XA_LRMD_RSC_DELETED, &event.rsc_deleted); pcmk__xe_get_time(msg, PCMK__XA_LRMD_RUN_TIME, &event.t_run); pcmk__xe_get_time(msg, PCMK__XA_LRMD_RCCHANGE_TIME, &event.t_rcchange); pcmk__xe_get_int(msg, PCMK__XA_LRMD_EXEC_TIME, &exec_time); CRM_LOG_ASSERT(exec_time >= 0); event.exec_time = QB_MAX(0, exec_time); pcmk__xe_get_int(msg, PCMK__XA_LRMD_QUEUE_TIME, &queue_time); CRM_LOG_ASSERT(queue_time >= 0); event.queue_time = QB_MAX(0, queue_time); event.op_type = pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_ACTION); event.user_data = pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_USERDATA_STR); event.type = lrmd_event_exec_complete; /* output and exit_reason may be freed by a callback */ event.output = pcmk__xe_get_copy(msg, PCMK__XA_LRMD_RSC_OUTPUT); lrmd__set_result(&event, event.rc, event.op_status, pcmk__xe_get(msg, PCMK__XA_LRMD_RSC_EXIT_REASON)); event.params = xml2list(msg); } else if (pcmk__str_eq(type, LRMD_OP_NEW_CLIENT, pcmk__str_none)) { event.type = lrmd_event_new_client; } else if (pcmk__str_eq(type, LRMD_OP_POKE, pcmk__str_none)) { event.type = lrmd_event_poke; } else { return; } crm_trace("op %s notify event received", type); native->callback(&event); if (event.params) { g_hash_table_destroy(event.params); } lrmd__reset_result(&event); } // \return Always 0, to indicate that IPC mainloop source should be kept static int lrmd_ipc_dispatch(const char *buffer, ssize_t length, gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; if (native->callback != NULL) { xmlNode *msg = pcmk__xml_parse(buffer); lrmd_dispatch_internal(msg, lrmd); pcmk__xml_free(msg); } return 0; } static void lrmd_free_xml(gpointer userdata) { pcmk__xml_free((xmlNode *) userdata); } static bool remote_executor_connected(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; return (native->remote->tls_session != NULL); } static void handle_remote_msg(xmlNode *xml, lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; const char *msg_type = NULL; msg_type = pcmk__xe_get(xml, PCMK__XA_LRMD_REMOTE_MSG_TYPE); if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) { lrmd_dispatch_internal(xml, lrmd); } else if (pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) { const char *op = pcmk__xe_get(xml, PCMK__XA_LRMD_OP); if (native->expected_late_replies > 0) { native->expected_late_replies--; /* The register op message we get as a response to lrmd_handshake_async * is a reply, so we have to handle that here. */ if (pcmk__str_eq(op, "register", pcmk__str_casei)) { int rc = process_lrmd_handshake_reply(xml, native); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); } } else { int reply_id = 0; pcmk__xe_get_int(xml, PCMK__XA_LRMD_CALLID, &reply_id); /* if this happens, we want to know about it */ crm_err("Got outdated Pacemaker Remote reply %d", reply_id); } } } /*! * \internal * \brief Notify trigger handler * * \param[in,out] userdata API connection * * \return Always return G_SOURCE_CONTINUE to leave this trigger handler in the * mainloop */ static int process_pending_notifies(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; if (native->pending_notify == NULL) { return G_SOURCE_CONTINUE; } crm_trace("Processing pending notifies"); g_list_foreach(native->pending_notify, lrmd_dispatch_internal, lrmd); g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; return G_SOURCE_CONTINUE; } /*! * \internal * \brief TLS dispatch function for file descriptor sources * * \param[in,out] userdata API connection * * \return -1 on error to remove the source from the mainloop, or 0 otherwise * to leave it in the mainloop */ static int lrmd_tls_dispatch(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *xml = NULL; int rc = pcmk_rc_ok; if (!remote_executor_connected(lrmd)) { crm_trace("TLS dispatch triggered after disconnect"); return -1; } crm_trace("TLS dispatch triggered"); rc = pcmk__remote_ready(native->remote, 0); if (rc == pcmk_rc_ok) { rc = pcmk__read_remote_message(native->remote, -1); } if (rc != pcmk_rc_ok && rc != ETIME) { crm_info("Lost %s executor connection while reading data", (native->remote_nodename? native->remote_nodename : "local")); lrmd_tls_disconnect(lrmd); return -1; } /* If rc is ETIME, there was nothing to read but we may already have a * full message in the buffer */ xml = pcmk__remote_message_xml(native->remote); if (xml == NULL) { return 0; } handle_remote_msg(xml, lrmd); pcmk__xml_free(xml); return 0; } /* Not used with mainloop */ int lrmd_poll(lrmd_t * lrmd, int timeout) { lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: return crm_ipc_ready(native->ipc); case pcmk__client_tls: if (native->pending_notify) { return 1; } else { int rc = pcmk__remote_ready(native->remote, 0); switch (rc) { case pcmk_rc_ok: return 1; case ETIME: return 0; default: return pcmk_rc2legacy(rc); } } default: crm_err("Unsupported executor connection type (bug?): %d", native->type); return -EPROTONOSUPPORT; } } /* Not used with mainloop */ bool lrmd_dispatch(lrmd_t * lrmd) { lrmd_private_t *private = NULL; pcmk__assert(lrmd != NULL); private = lrmd->lrmd_private; switch (private->type) { case pcmk__client_ipc: while (crm_ipc_ready(private->ipc)) { if (crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); lrmd_ipc_dispatch(msg, strlen(msg), lrmd); pcmk__ipc_free_client_buffer(private->ipc); } } break; case pcmk__client_tls: lrmd_tls_dispatch(lrmd); break; default: crm_err("Unsupported executor connection type (bug?): %d", private->type); } if (lrmd_api_is_connected(lrmd) == FALSE) { crm_err("Connection closed"); return FALSE; } return TRUE; } static xmlNode * lrmd_create_op(const char *token, const char *op, xmlNode *data, int timeout, enum lrmd_call_options options) { xmlNode *op_msg = NULL; CRM_CHECK(token != NULL, return NULL); op_msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_COMMAND); pcmk__xe_set(op_msg, PCMK__XA_T, PCMK__VALUE_LRMD); pcmk__xe_set(op_msg, PCMK__XA_LRMD_OP, op); pcmk__xe_set_int(op_msg, PCMK__XA_LRMD_TIMEOUT, timeout); pcmk__xe_set_int(op_msg, PCMK__XA_LRMD_CALLOPT, options); if (data != NULL) { xmlNode *wrapper = pcmk__xe_create(op_msg, PCMK__XE_LRMD_CALLDATA); pcmk__xml_copy(wrapper, data); } crm_trace("Created executor %s command with call options %.8lx (%d)", op, (long)options, options); return op_msg; } static void lrmd_ipc_connection_destroy(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: crm_info("Disconnected from local executor"); break; case pcmk__client_tls: crm_info("Disconnected from remote executor on %s", native->remote_nodename); break; default: crm_err("Unsupported executor connection type %d (bug?)", native->type); } /* Prevent these from being cleaned up in lrmd_api_disconnect() */ native->ipc = NULL; native->source = NULL; if (native->callback) { lrmd_event_data_t event = { 0, }; event.type = lrmd_event_disconnect; event.remote_nodename = native->remote_nodename; native->callback(&event); } } static void lrmd_tls_connection_destroy(gpointer userdata) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; crm_info("TLS connection destroyed"); if (native->remote->tls_session) { gnutls_bye(native->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(native->remote->tls_session); native->remote->tls_session = NULL; } if (native->tls) { pcmk__free_tls(native->tls); native->tls = NULL; } if (native->sock >= 0) { close(native->sock); } if (native->process_notify) { mainloop_destroy_trigger(native->process_notify); native->process_notify = NULL; } if (native->pending_notify) { g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } if (native->handshake_trigger != NULL) { mainloop_destroy_trigger(native->handshake_trigger); native->handshake_trigger = NULL; } free(native->remote->buffer); free(native->remote->start_state); native->remote->buffer = NULL; native->remote->start_state = NULL; native->source = 0; native->sock = -1; if (native->callback) { lrmd_event_data_t event = { 0, }; event.remote_nodename = native->remote_nodename; event.type = lrmd_event_disconnect; native->callback(&event); } return; } // \return Standard Pacemaker return code int lrmd__remote_send_xml(pcmk__remote_t *session, xmlNode *msg, uint32_t id, const char *msg_type) { pcmk__xe_set_int(msg, PCMK__XA_LRMD_REMOTE_MSG_ID, id); pcmk__xe_set(msg, PCMK__XA_LRMD_REMOTE_MSG_TYPE, msg_type); return pcmk__remote_send_xml(session, msg); } // \return Standard Pacemaker return code static int read_remote_reply(lrmd_t *lrmd, int total_timeout, int expected_reply_id, xmlNode **reply) { lrmd_private_t *native = lrmd->lrmd_private; time_t start = time(NULL); const char *msg_type = NULL; int reply_id = 0; int remaining_timeout = 0; int rc = pcmk_rc_ok; /* A timeout of 0 here makes no sense. We have to wait a period of time * for the response to come back. If -1 or 0, default to 10 seconds. */ if (total_timeout <= 0 || total_timeout > MAX_TLS_RECV_WAIT) { total_timeout = MAX_TLS_RECV_WAIT; } for (*reply = NULL; *reply == NULL; ) { *reply = pcmk__remote_message_xml(native->remote); if (*reply == NULL) { /* read some more off the tls buffer if we still have time left. */ if (remaining_timeout) { remaining_timeout = total_timeout - ((time(NULL) - start) * 1000); } else { remaining_timeout = total_timeout; } if (remaining_timeout <= 0) { return ETIME; } rc = pcmk__read_remote_message(native->remote, remaining_timeout); if (rc != pcmk_rc_ok) { return rc; } *reply = pcmk__remote_message_xml(native->remote); if (*reply == NULL) { return ENOMSG; } } pcmk__xe_get_int(*reply, PCMK__XA_LRMD_REMOTE_MSG_ID, &reply_id); msg_type = pcmk__xe_get(*reply, PCMK__XA_LRMD_REMOTE_MSG_TYPE); if (!msg_type) { crm_err("Empty msg type received while waiting for reply"); pcmk__xml_free(*reply); *reply = NULL; } else if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) { /* got a notify while waiting for reply, trigger the notify to be processed later */ crm_info("queueing notify"); native->pending_notify = g_list_append(native->pending_notify, *reply); if (native->process_notify) { crm_info("notify trigger set."); mainloop_set_trigger(native->process_notify); } *reply = NULL; } else if (!pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) { /* msg isn't a reply, make some noise */ crm_err("Expected a reply, got %s", msg_type); pcmk__xml_free(*reply); *reply = NULL; } else if (reply_id != expected_reply_id) { if (native->expected_late_replies > 0) { native->expected_late_replies--; } else { crm_err("Got outdated reply, expected id %d got id %d", expected_reply_id, reply_id); } pcmk__xml_free(*reply); *reply = NULL; } } if (native->remote->buffer && native->process_notify) { mainloop_set_trigger(native->process_notify); } return rc; } // \return Standard Pacemaker return code static int send_remote_message(lrmd_t *lrmd, xmlNode *msg) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; global_remote_msg_id++; if (global_remote_msg_id <= 0) { global_remote_msg_id = 1; } rc = lrmd__remote_send_xml(native->remote, msg, global_remote_msg_id, "request"); if (rc != pcmk_rc_ok) { crm_err("Disconnecting because TLS message could not be sent to " "Pacemaker Remote: %s", pcmk_rc_str(rc)); lrmd_tls_disconnect(lrmd); } return rc; } static int lrmd_tls_send_recv(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply) { int rc = 0; xmlNode *xml = NULL; if (!remote_executor_connected(lrmd)) { return -ENOTCONN; } rc = send_remote_message(lrmd, msg); if (rc != pcmk_rc_ok) { return pcmk_rc2legacy(rc); } rc = read_remote_reply(lrmd, timeout, global_remote_msg_id, &xml); if (rc != pcmk_rc_ok) { crm_err("Disconnecting remote after request %d reply not received: %s " QB_XS " rc=%d timeout=%dms", global_remote_msg_id, pcmk_rc_str(rc), rc, timeout); lrmd_tls_disconnect(lrmd); } if (reply) { *reply = xml; } else { pcmk__xml_free(xml); } return pcmk_rc2legacy(rc); } static int lrmd_send_xml(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: rc = crm_ipc_send(native->ipc, msg, crm_ipc_client_response, timeout, reply); break; case pcmk__client_tls: rc = lrmd_tls_send_recv(lrmd, msg, timeout, reply); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } return rc; } static int lrmd_send_xml_no_reply(lrmd_t * lrmd, xmlNode * msg) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: rc = crm_ipc_send(native->ipc, msg, crm_ipc_flags_none, 0, NULL); break; case pcmk__client_tls: rc = send_remote_message(lrmd, msg); if (rc == pcmk_rc_ok) { /* we don't want to wait around for the reply, but * since the request/reply protocol needs to behave the same * as libqb, a reply will eventually come later anyway. */ native->expected_late_replies++; } rc = pcmk_rc2legacy(rc); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } return rc; } static int lrmd_api_is_connected(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: return crm_ipc_connected(native->ipc); case pcmk__client_tls: return remote_executor_connected(lrmd); default: crm_err("Unsupported executor connection type (bug?): %d", native->type); return 0; } } /*! * \internal * \brief Send a prepared API command to the executor * * \param[in,out] lrmd Existing connection to the executor * \param[in] op Name of API command to send * \param[in] data Command data XML to add to the sent command * \param[out] output_data If expecting a reply, it will be stored here * \param[in] timeout Timeout in milliseconds (if 0, defaults to * a sensible value per the type of connection, * standard vs. pacemaker remote); * also propagated to the command XML * \param[in] call_options Call options to pass to server when sending * \param[in] expect_reply If true, wait for a reply from the server; * must be true for IPC (as opposed to TLS) clients * * \return pcmk_ok on success, -errno on error */ static int lrmd_send_command(lrmd_t *lrmd, const char *op, xmlNode *data, xmlNode **output_data, int timeout, enum lrmd_call_options options, bool expect_reply) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; if (!lrmd_api_is_connected(lrmd)) { return -ENOTCONN; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } CRM_LOG_ASSERT(native->token != NULL); crm_trace("Sending %s op to executor", op); op_msg = lrmd_create_op(native->token, op, data, timeout, options); if (op_msg == NULL) { return -EINVAL; } if (expect_reply) { rc = lrmd_send_xml(lrmd, op_msg, timeout, &op_reply); } else { rc = lrmd_send_xml_no_reply(lrmd, op_msg); goto done; } if (rc < 0) { crm_err("Couldn't perform %s operation (timeout=%d): %d", op, timeout, pcmk_strerror(rc)); goto done; } else if (op_reply == NULL) { rc = -ENOMSG; goto done; } rc = pcmk_ok; crm_trace("%s op reply received", op); if (pcmk__xe_get_int(op_reply, PCMK__XA_LRMD_RC, &rc) != pcmk_rc_ok) { rc = -ENOMSG; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (output_data) { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } done: if (lrmd_api_is_connected(lrmd) == FALSE) { crm_err("Executor disconnected"); } pcmk__xml_free(op_msg); pcmk__xml_free(op_reply); return rc; } static int lrmd_api_poke_connection(lrmd_t * lrmd) { int rc; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); rc = lrmd_send_command(lrmd, LRMD_OP_POKE, data, NULL, 0, 0, (native->type == pcmk__client_ipc)); pcmk__xml_free(data); return rc < 0 ? rc : pcmk_ok; } // \return Standard Pacemaker return code int lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash) { int rc = pcmk_rc_ok; const char *value; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *data = pcmk__xe_create(NULL, PCMK__XA_LRMD_OP); pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); value = g_hash_table_lookup(hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT); if ((value) && (stonith__watchdog_fencing_enabled_for_node(native->remote_nodename))) { pcmk__xe_set(data, PCMK__XA_LRMD_WATCHDOG, value); } rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0, (native->type == pcmk__client_ipc)); pcmk__xml_free(data); return (rc < 0)? pcmk_legacy2rc(rc) : pcmk_rc_ok; } static xmlNode * lrmd_handshake_hello_msg(const char *name, bool is_proxy) { xmlNode *hello = pcmk__xe_create(NULL, PCMK__XE_LRMD_COMMAND); pcmk__xe_set(hello, PCMK__XA_T, PCMK__VALUE_LRMD); pcmk__xe_set(hello, PCMK__XA_LRMD_OP, CRM_OP_REGISTER); pcmk__xe_set(hello, PCMK__XA_LRMD_CLIENTNAME, name); pcmk__xe_set(hello, PCMK__XA_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION); /* advertise that we are a proxy provider */ if (is_proxy) { - pcmk__xe_set_bool_attr(hello, PCMK__XA_LRMD_IS_IPC_PROVIDER, true); + pcmk__xe_set_bool(hello, PCMK__XA_LRMD_IS_IPC_PROVIDER, true); } return hello; } static int process_lrmd_handshake_reply(xmlNode *reply, lrmd_private_t *native) { int rc = pcmk_rc_ok; const char *version = pcmk__xe_get(reply, PCMK__XA_LRMD_PROTOCOL_VERSION); const char *msg_type = pcmk__xe_get(reply, PCMK__XA_LRMD_OP); const char *tmp_ticket = pcmk__xe_get(reply, PCMK__XA_LRMD_CLIENTID); const char *start_state = pcmk__xe_get(reply, PCMK__XA_NODE_START_STATE); pcmk__xe_get_int(reply, PCMK__XA_LRMD_RC, &rc); rc = pcmk_legacy2rc(rc); /* The remote executor may add its uptime to the XML reply, which is useful * in handling transient attributes when the connection to the remote node * unexpectedly drops. If no parameter is given, just default to -1. */ native->remote->uptime = -1; pcmk__xe_get_time(reply, PCMK__XA_UPTIME, &native->remote->uptime); if (start_state) { native->remote->start_state = strdup(start_state); } if (rc == EPROTO) { crm_err("Executor protocol version mismatch between client (%s) and server (%s)", LRMD_PROTOCOL_VERSION, version); crm_log_xml_err(reply, "Protocol Error"); } else if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) { crm_err("Invalid registration message: %s", msg_type); crm_log_xml_err(reply, "Bad reply"); rc = EPROTO; } else if (tmp_ticket == NULL) { crm_err("No registration token provided"); crm_log_xml_err(reply, "Bad reply"); rc = EPROTO; } else { crm_trace("Obtained registration token: %s", tmp_ticket); native->token = strdup(tmp_ticket); native->peer_version = strdup(version?version:"1.0"); /* Included since 1.1 */ rc = pcmk_rc_ok; } return rc; } static int lrmd_handshake(lrmd_t * lrmd, const char *name) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *reply = NULL; xmlNode *hello = lrmd_handshake_hello_msg(name, native->proxy_callback != NULL); rc = lrmd_send_xml(lrmd, hello, -1, &reply); if (rc < 0) { crm_debug("Couldn't complete registration with the executor API: %s", pcmk_strerror(rc)); rc = ECOMM; } else if (reply == NULL) { crm_err("Did not receive registration reply"); rc = EPROTO; } else { rc = process_lrmd_handshake_reply(reply, native); } pcmk__xml_free(reply); pcmk__xml_free(hello); if (rc != pcmk_rc_ok) { lrmd_api_disconnect(lrmd); } return rc; } static int lrmd_handshake_async(lrmd_t * lrmd, const char *name) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; xmlNode *hello = lrmd_handshake_hello_msg(name, native->proxy_callback != NULL); rc = send_remote_message(lrmd, hello); if (rc == pcmk_rc_ok) { native->expected_late_replies++; } else { lrmd_api_disconnect(lrmd); } pcmk__xml_free(hello); return rc; } static int lrmd_ipc_connect(lrmd_t * lrmd, int *fd) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; struct ipc_client_callbacks lrmd_callbacks = { .dispatch = lrmd_ipc_dispatch, .destroy = lrmd_ipc_connection_destroy }; crm_info("Connecting to executor"); if (fd) { /* No mainloop */ native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0); if (native->ipc != NULL) { rc = pcmk__connect_generic_ipc(native->ipc); if (rc == pcmk_rc_ok) { rc = pcmk__ipc_fd(native->ipc, fd); } if (rc != pcmk_rc_ok) { crm_err("Connection to executor failed: %s", pcmk_rc_str(rc)); rc = -ENOTCONN; } } } else { native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { crm_debug("Could not connect to the executor API"); rc = -ENOTCONN; } return rc; } static void copy_gnutls_datum(gnutls_datum_t *dest, gnutls_datum_t *source) { pcmk__assert((dest != NULL) && (source != NULL) && (source->data != NULL)); dest->data = gnutls_malloc(source->size); pcmk__mem_assert(dest->data); memcpy(dest->data, source->data, source->size); dest->size = source->size; } static void clear_gnutls_datum(gnutls_datum_t *datum) { gnutls_free(datum->data); datum->data = NULL; datum->size = 0; } #define KEY_READ_LEN 256 // Chunk size for reading key from file // \return Standard Pacemaker return code static int read_gnutls_key(const char *location, gnutls_datum_t *key) { FILE *stream = NULL; size_t buf_len = KEY_READ_LEN; if ((location == NULL) || (key == NULL)) { return EINVAL; } stream = fopen(location, "r"); if (stream == NULL) { return errno; } key->data = gnutls_malloc(buf_len); key->size = 0; while (!feof(stream)) { int next = fgetc(stream); if (next == EOF) { if (!feof(stream)) { crm_warn("Pacemaker Remote key read was partially successful " "(copy in memory may be corrupted)"); } break; } if (key->size == buf_len) { buf_len = key->size + KEY_READ_LEN; key->data = gnutls_realloc(key->data, buf_len); pcmk__assert(key->data); } key->data[key->size++] = (unsigned char) next; } fclose(stream); if (key->size == 0) { clear_gnutls_datum(key); return ENOKEY; } return pcmk_rc_ok; } // Cache the most recently used Pacemaker Remote authentication key struct key_cache_s { time_t updated; // When cached key was read (valid for 1 minute) const char *location; // Where cached key was read from gnutls_datum_t key; // Cached key }; static bool key_is_cached(struct key_cache_s *key_cache) { return key_cache->updated != 0; } static bool key_cache_expired(struct key_cache_s *key_cache) { return (time(NULL) - key_cache->updated) >= 60; } static void clear_key_cache(struct key_cache_s *key_cache) { clear_gnutls_datum(&(key_cache->key)); if ((key_cache->updated != 0) || (key_cache->location != NULL)) { key_cache->updated = 0; key_cache->location = NULL; crm_debug("Cleared Pacemaker Remote key cache"); } } static void get_cached_key(struct key_cache_s *key_cache, gnutls_datum_t *key) { copy_gnutls_datum(key, &(key_cache->key)); crm_debug("Using cached Pacemaker Remote key from %s", pcmk__s(key_cache->location, "unknown location")); } static void cache_key(struct key_cache_s *key_cache, gnutls_datum_t *key, const char *location) { key_cache->updated = time(NULL); key_cache->location = location; copy_gnutls_datum(&(key_cache->key), key); crm_debug("Using (and cacheing) Pacemaker Remote key from %s", pcmk__s(location, "unknown location")); } /*! * \internal * \brief Get Pacemaker Remote authentication key from file or cache * * \param[in] location Path to key file to try (this memory must * persist across all calls of this function) * \param[out] key Key from location or cache * * \return Standard Pacemaker return code */ static int get_remote_key(const char *location, gnutls_datum_t *key) { static struct key_cache_s key_cache = { 0, }; int rc = pcmk_rc_ok; if ((location == NULL) || (key == NULL)) { return EINVAL; } if (key_is_cached(&key_cache)) { if (key_cache_expired(&key_cache)) { clear_key_cache(&key_cache); } else { get_cached_key(&key_cache, key); return pcmk_rc_ok; } } rc = read_gnutls_key(location, key); if (rc != pcmk_rc_ok) { return rc; } cache_key(&key_cache, key, location); return pcmk_rc_ok; } /*! * \internal * \brief Initialize the Pacemaker Remote authentication key * * Try loading the Pacemaker Remote authentication key from cache if available, * otherwise from these locations, in order of preference: * * - The value of the PCMK_authkey_location environment variable, if set * - The Pacemaker default key file location * * \param[out] key Where to store key * * \return Standard Pacemaker return code */ int lrmd__init_remote_key(gnutls_datum_t *key) { static const char *env_location = NULL; static bool need_env = true; int rc = pcmk_rc_ok; if (need_env) { env_location = pcmk__env_option(PCMK__ENV_AUTHKEY_LOCATION); need_env = false; } // Try location in environment variable, if set if (env_location != NULL) { rc = get_remote_key(env_location, key); if (rc == pcmk_rc_ok) { return pcmk_rc_ok; } crm_warn("Could not read Pacemaker Remote key from %s: %s", env_location, pcmk_rc_str(rc)); return ENOKEY; } // Try default location, if environment wasn't explicitly set to it rc = get_remote_key(DEFAULT_REMOTE_KEY_LOCATION, key); if (rc == pcmk_rc_ok) { return pcmk_rc_ok; } crm_warn("Could not read Pacemaker Remote key from default location %s: %s", DEFAULT_REMOTE_KEY_LOCATION, pcmk_rc_str(rc)); return ENOKEY; } static void report_async_connection_result(lrmd_t * lrmd, int rc) { lrmd_private_t *native = lrmd->lrmd_private; if (native->callback) { lrmd_event_data_t event = { 0, }; event.type = lrmd_event_connect; event.remote_nodename = native->remote_nodename; event.connection_rc = rc; native->callback(&event); } } static void tls_handshake_failed(lrmd_t *lrmd, int tls_rc, int rc) { lrmd_private_t *native = lrmd->lrmd_private; crm_warn("Disconnecting after TLS handshake with " "Pacemaker Remote server %s:%d failed: %s", native->server, native->port, (rc == EPROTO)? gnutls_strerror(tls_rc) : pcmk_rc_str(rc)); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); gnutls_deinit(native->remote->tls_session); native->remote->tls_session = NULL; lrmd_tls_connection_destroy(lrmd); } static void tls_handshake_succeeded(lrmd_t *lrmd) { int rc = pcmk_rc_ok; lrmd_private_t *native = lrmd->lrmd_private; /* Now that the handshake is done, see if any client TLS certificate is * close to its expiration date and log if so. If a TLS certificate is not * in use, this function will just return so we don't need to check for the * session type here. */ pcmk__tls_check_cert_expiration(native->remote->tls_session); crm_info("TLS connection to Pacemaker Remote server %s:%d succeeded", native->server, native->port); rc = add_tls_to_mainloop(lrmd, true); /* If add_tls_to_mainloop failed, report that right now. Otherwise, we have * to wait until we read the async reply to report anything. */ if (rc != pcmk_rc_ok) { report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); } } /*! * \internal * \brief Perform a TLS client handshake with a Pacemaker Remote server * * \param[in] lrmd Newly established Pacemaker Remote executor connection * * \return Standard Pacemaker return code */ static int tls_client_handshake(lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; int tls_rc = GNUTLS_E_SUCCESS; int rc = pcmk__tls_client_handshake(native->remote, TLS_HANDSHAKE_TIMEOUT, &tls_rc); if (rc != pcmk_rc_ok) { tls_handshake_failed(lrmd, tls_rc, rc); } return rc; } /*! * \internal * \brief Add trigger and file descriptor mainloop sources for TLS * * \param[in,out] lrmd API connection with established TLS session * \param[in] do_api_handshake Whether to perform executor handshake * * \return Standard Pacemaker return code */ static int add_tls_to_mainloop(lrmd_t *lrmd, bool do_api_handshake) { lrmd_private_t *native = lrmd->lrmd_private; int rc = pcmk_rc_ok; char *name = pcmk__assert_asprintf("pacemaker-remote-%s:%d", native->server, native->port); struct mainloop_fd_callbacks tls_fd_callbacks = { .dispatch = lrmd_tls_dispatch, .destroy = lrmd_tls_connection_destroy, }; native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH, process_pending_notifies, lrmd); native->source = mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd, &tls_fd_callbacks); /* Async connections lose the client name provided by the API caller, so we * have to use our generated name here to perform the executor handshake. * * @TODO Keep track of the caller-provided name. Perhaps we should be using * that name in this function instead of generating one anyway. */ if (do_api_handshake) { rc = lrmd_handshake_async(lrmd, name); } free(name); return rc; } struct handshake_data_s { lrmd_t *lrmd; time_t start_time; int timeout_sec; }; static gboolean try_handshake_cb(gpointer user_data) { struct handshake_data_s *hs = user_data; lrmd_t *lrmd = hs->lrmd; lrmd_private_t *native = lrmd->lrmd_private; pcmk__remote_t *remote = native->remote; int rc = pcmk_rc_ok; int tls_rc = GNUTLS_E_SUCCESS; if (time(NULL) >= hs->start_time + hs->timeout_sec) { rc = ETIME; tls_handshake_failed(lrmd, GNUTLS_E_TIMEDOUT, rc); free(hs); return 0; } rc = pcmk__tls_client_try_handshake(remote, &tls_rc); if (rc == pcmk_rc_ok) { tls_handshake_succeeded(lrmd); free(hs); return 0; } else if (rc == EAGAIN) { mainloop_set_trigger(native->handshake_trigger); return 1; } else { rc = EKEYREJECTED; tls_handshake_failed(lrmd, tls_rc, rc); free(hs); return 0; } } static void lrmd_tcp_connect_cb(void *userdata, int rc, int sock) { lrmd_t *lrmd = userdata; lrmd_private_t *native = lrmd->lrmd_private; int tls_rc = GNUTLS_E_SUCCESS; bool use_cert = pcmk__x509_enabled(); native->async_timer = 0; if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); crm_info("Could not connect to Pacemaker Remote at %s:%d: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); return; } /* The TCP connection was successful, so establish the TLS connection. */ native->sock = sock; if (native->tls == NULL) { rc = pcmk__init_tls(&native->tls, false, use_cert ? GNUTLS_CRD_CERTIFICATE : GNUTLS_CRD_PSK); if ((rc != pcmk_rc_ok) || (native->tls == NULL)) { lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); return; } } if (!use_cert) { gnutls_datum_t psk_key = { NULL, 0 }; rc = lrmd__init_remote_key(&psk_key); if (rc != pcmk_rc_ok) { crm_info("Could not connect to Pacemaker Remote at %s:%d: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, pcmk_rc2legacy(rc)); return; } pcmk__tls_add_psk_key(native->tls, &psk_key); gnutls_free(psk_key.data); } native->remote->tls_session = pcmk__new_tls_session(native->tls, sock); if (native->remote->tls_session == NULL) { lrmd_tls_connection_destroy(lrmd); report_async_connection_result(lrmd, -EPROTO); return; } /* If the TLS handshake immediately succeeds or fails, we can handle that * now without having to deal with mainloops and retries. Otherwise, add a * trigger to keep trying until we get a result (or it times out). */ rc = pcmk__tls_client_try_handshake(native->remote, &tls_rc); if (rc == EAGAIN) { struct handshake_data_s *hs = NULL; if (native->handshake_trigger != NULL) { return; } hs = pcmk__assert_alloc(1, sizeof(struct handshake_data_s)); hs->lrmd = lrmd; hs->start_time = time(NULL); hs->timeout_sec = TLS_HANDSHAKE_TIMEOUT; native->handshake_trigger = mainloop_add_trigger(G_PRIORITY_LOW, try_handshake_cb, hs); mainloop_set_trigger(native->handshake_trigger); } else if (rc == pcmk_rc_ok) { tls_handshake_succeeded(lrmd); } else { tls_handshake_failed(lrmd, tls_rc, rc); } } static int lrmd_tls_connect_async(lrmd_t * lrmd, int timeout /*ms */ ) { int rc = pcmk_rc_ok; int timer_id = 0; lrmd_private_t *native = lrmd->lrmd_private; native->sock = -1; rc = pcmk__connect_remote(native->server, native->port, timeout, &timer_id, &(native->sock), lrmd, lrmd_tcp_connect_cb); if (rc != pcmk_rc_ok) { crm_warn("Pacemaker Remote connection to %s:%d failed: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); return rc; } native->async_timer = timer_id; return rc; } static int lrmd_tls_connect(lrmd_t * lrmd, int *fd) { int rc = pcmk_rc_ok; bool use_cert = pcmk__x509_enabled(); lrmd_private_t *native = lrmd->lrmd_private; native->sock = -1; rc = pcmk__connect_remote(native->server, native->port, 0, NULL, &(native->sock), NULL, NULL); if (rc != pcmk_rc_ok) { crm_warn("Pacemaker Remote connection to %s:%d failed: %s " QB_XS " rc=%d", native->server, native->port, pcmk_rc_str(rc), rc); lrmd_tls_connection_destroy(lrmd); return ENOTCONN; } if (native->tls == NULL) { rc = pcmk__init_tls(&native->tls, false, use_cert ? GNUTLS_CRD_CERTIFICATE : GNUTLS_CRD_PSK); if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); return rc; } } if (!use_cert) { gnutls_datum_t psk_key = { NULL, 0 }; rc = lrmd__init_remote_key(&psk_key); if (rc != pcmk_rc_ok) { lrmd_tls_connection_destroy(lrmd); return rc; } pcmk__tls_add_psk_key(native->tls, &psk_key); gnutls_free(psk_key.data); } native->remote->tls_session = pcmk__new_tls_session(native->tls, native->sock); if (native->remote->tls_session == NULL) { lrmd_tls_connection_destroy(lrmd); return EPROTO; } if (tls_client_handshake(lrmd) != pcmk_rc_ok) { return EKEYREJECTED; } crm_info("Client TLS connection established with Pacemaker Remote server %s:%d", native->server, native->port); if (fd) { *fd = native->sock; } else { rc = add_tls_to_mainloop(lrmd, false); } return rc; } static int lrmd_api_connect(lrmd_t * lrmd, const char *name, int *fd) { int rc = -ENOTCONN; lrmd_private_t *native = lrmd->lrmd_private; switch (native->type) { case pcmk__client_ipc: rc = lrmd_ipc_connect(lrmd, fd); break; case pcmk__client_tls: rc = lrmd_tls_connect(lrmd, fd); rc = pcmk_rc2legacy(rc); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } if (rc == pcmk_ok) { rc = lrmd_handshake(lrmd, name); rc = pcmk_rc2legacy(rc); } return rc; } static int lrmd_api_connect_async(lrmd_t * lrmd, const char *name, int timeout) { int rc = pcmk_ok; lrmd_private_t *native = lrmd->lrmd_private; CRM_CHECK(native && native->callback, return -EINVAL); switch (native->type) { case pcmk__client_ipc: /* fake async connection with ipc. it should be fast * enough that we gain very little from async */ rc = lrmd_api_connect(lrmd, name, NULL); if (!rc) { report_async_connection_result(lrmd, rc); } break; case pcmk__client_tls: rc = lrmd_tls_connect_async(lrmd, timeout); rc = pcmk_rc2legacy(rc); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } return rc; } static void lrmd_ipc_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; native->ipc = NULL; } else if (native->ipc) { /* Not attached to mainloop */ crm_ipc_t *ipc = native->ipc; native->ipc = NULL; crm_ipc_close(ipc); crm_ipc_destroy(ipc); } } static void lrmd_tls_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote->tls_session) { gnutls_bye(native->remote->tls_session, GNUTLS_SHUT_RDWR); gnutls_deinit(native->remote->tls_session); native->remote->tls_session = NULL; } if (native->async_timer) { g_source_remove(native->async_timer); native->async_timer = 0; } if (native->source != NULL) { /* Attached to mainloop */ mainloop_del_ipc_client(native->source); native->source = NULL; } else if (native->sock >= 0) { close(native->sock); native->sock = -1; } if (native->pending_notify) { g_list_free_full(native->pending_notify, lrmd_free_xml); native->pending_notify = NULL; } } static int lrmd_api_disconnect(lrmd_t * lrmd) { lrmd_private_t *native = lrmd->lrmd_private; int rc = pcmk_ok; switch (native->type) { case pcmk__client_ipc: crm_debug("Disconnecting from local executor"); lrmd_ipc_disconnect(lrmd); break; case pcmk__client_tls: crm_debug("Disconnecting from remote executor on %s", native->remote_nodename); lrmd_tls_disconnect(lrmd); break; default: crm_err("Unsupported executor connection type (bug?): %d", native->type); rc = -EPROTONOSUPPORT; } free(native->token); native->token = NULL; free(native->peer_version); native->peer_version = NULL; return rc; } static int lrmd_api_register_rsc(lrmd_t * lrmd, const char *rsc_id, const char *class, const char *provider, const char *type, enum lrmd_call_options options) { int rc = pcmk_ok; xmlNode *data = NULL; if (!class || !type || !rsc_id) { return -EINVAL; } if (pcmk__is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) && (provider == NULL)) { return -EINVAL; } data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ID, rsc_id); pcmk__xe_set(data, PCMK__XA_LRMD_CLASS, class); pcmk__xe_set(data, PCMK__XA_LRMD_PROVIDER, provider); pcmk__xe_set(data, PCMK__XA_LRMD_TYPE, type); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_REG, data, NULL, 0, options, true); pcmk__xml_free(data); return rc; } static int lrmd_api_unregister_rsc(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ID, rsc_id); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_UNREG, data, NULL, 0, options, true); pcmk__xml_free(data); return rc; } lrmd_rsc_info_t * lrmd_new_rsc_info(const char *rsc_id, const char *standard, const char *provider, const char *type) { lrmd_rsc_info_t *rsc_info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t)); rsc_info->id = pcmk__str_copy(rsc_id); rsc_info->standard = pcmk__str_copy(standard); rsc_info->provider = pcmk__str_copy(provider); rsc_info->type = pcmk__str_copy(type); return rsc_info; } lrmd_rsc_info_t * lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info) { return lrmd_new_rsc_info(rsc_info->id, rsc_info->standard, rsc_info->provider, rsc_info->type); } void lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info) { if (!rsc_info) { return; } free(rsc_info->id); free(rsc_info->type); free(rsc_info->standard); free(rsc_info->provider); free(rsc_info); } static lrmd_rsc_info_t * lrmd_api_get_rsc_info(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options) { lrmd_rsc_info_t *rsc_info = NULL; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); xmlNode *output = NULL; const char *class = NULL; const char *provider = NULL; const char *type = NULL; pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ID, rsc_id); lrmd_send_command(lrmd, LRMD_OP_RSC_INFO, data, &output, 0, options, true); pcmk__xml_free(data); if (!output) { return NULL; } class = pcmk__xe_get(output, PCMK__XA_LRMD_CLASS); provider = pcmk__xe_get(output, PCMK__XA_LRMD_PROVIDER); type = pcmk__xe_get(output, PCMK__XA_LRMD_TYPE); if (!class || !type) { pcmk__xml_free(output); return NULL; } else if (pcmk__is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) && (provider == NULL)) { pcmk__xml_free(output); return NULL; } rsc_info = lrmd_new_rsc_info(rsc_id, class, provider, type); pcmk__xml_free(output); return rsc_info; } void lrmd_free_op_info(lrmd_op_info_t *op_info) { if (op_info) { free(op_info->rsc_id); free(op_info->action); free(op_info->interval_ms_s); free(op_info->timeout_ms_s); free(op_info); } } static int lrmd_api_get_recurring_ops(lrmd_t *lrmd, const char *rsc_id, int timeout_ms, enum lrmd_call_options options, GList **output) { xmlNode *data = NULL; xmlNode *output_xml = NULL; int rc = pcmk_ok; if (output == NULL) { return -EINVAL; } *output = NULL; // Send request if (rsc_id) { data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ID, rsc_id); } rc = lrmd_send_command(lrmd, LRMD_OP_GET_RECURRING, data, &output_xml, timeout_ms, options, true); if (data) { pcmk__xml_free(data); } // Process reply if ((rc != pcmk_ok) || (output_xml == NULL)) { return rc; } for (const xmlNode *rsc_xml = pcmk__xe_first_child(output_xml, PCMK__XE_LRMD_RSC, NULL, NULL); (rsc_xml != NULL) && (rc == pcmk_ok); rsc_xml = pcmk__xe_next(rsc_xml, PCMK__XE_LRMD_RSC)) { rsc_id = pcmk__xe_get(rsc_xml, PCMK__XA_LRMD_RSC_ID); if (rsc_id == NULL) { crm_err("Could not parse recurring operation information from executor"); continue; } for (const xmlNode *op_xml = pcmk__xe_first_child(rsc_xml, PCMK__XE_LRMD_RSC_OP, NULL, NULL); op_xml != NULL; op_xml = pcmk__xe_next(op_xml, PCMK__XE_LRMD_RSC_OP)) { lrmd_op_info_t *op_info = calloc(1, sizeof(lrmd_op_info_t)); if (op_info == NULL) { rc = -ENOMEM; break; } op_info->rsc_id = strdup(rsc_id); op_info->action = pcmk__xe_get_copy(op_xml, PCMK__XA_LRMD_RSC_ACTION); op_info->interval_ms_s = pcmk__xe_get_copy(op_xml, PCMK__XA_LRMD_RSC_INTERVAL); op_info->timeout_ms_s = pcmk__xe_get_copy(op_xml, PCMK__XA_LRMD_TIMEOUT); *output = g_list_prepend(*output, op_info); } } pcmk__xml_free(output_xml); return rc; } static void lrmd_api_set_callback(lrmd_t * lrmd, lrmd_event_callback callback) { lrmd_private_t *native = lrmd->lrmd_private; native->callback = callback; } void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)) { lrmd_private_t *native = lrmd->lrmd_private; native->proxy_callback = callback; native->proxy_callback_userdata = userdata; } void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg) { lrmd_private_t *native = lrmd->lrmd_private; if (native->proxy_callback) { crm_log_xml_trace(msg, "PROXY_INBOUND"); native->proxy_callback(lrmd, native->proxy_callback_userdata, msg); } } int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg) { if (lrmd == NULL) { return -ENOTCONN; } pcmk__xe_set(msg, PCMK__XA_LRMD_OP, CRM_OP_IPC_FWD); crm_log_xml_trace(msg, "PROXY_OUTBOUND"); return lrmd_send_xml_no_reply(lrmd, msg); } static int stonith_get_metadata(const char *type, char **output) { int rc = pcmk_ok; stonith_t *stonith_api = stonith__api_new(); if (stonith_api == NULL) { crm_err("Could not get fence agent meta-data: API memory allocation failed"); return -ENOMEM; } rc = stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type, NULL, output, 0); if ((rc == pcmk_ok) && (*output == NULL)) { rc = -EIO; } stonith_api->cmds->free(stonith_api); return rc; } static int lrmd_api_get_metadata(lrmd_t *lrmd, const char *standard, const char *provider, const char *type, char **output, enum lrmd_call_options options) { return lrmd->cmds->get_metadata_params(lrmd, standard, provider, type, output, options, NULL); } static int lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard, const char *provider, const char *type, char **output, enum lrmd_call_options options, lrmd_key_value_t *params) { svc_action_t *action = NULL; GHashTable *params_table = NULL; if (!standard || !type) { lrmd_key_value_freeall(params); return -EINVAL; } if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { lrmd_key_value_freeall(params); // stonith-class resources don't support a provider return stonith_get_metadata(type, output); } params_table = pcmk__strkey_table(free, free); for (const lrmd_key_value_t *param = params; param; param = param->next) { pcmk__insert_dup(params_table, param->key, param->value); } action = services__create_resource_action(type, standard, provider, type, PCMK_ACTION_META_DATA, 0, PCMK_DEFAULT_ACTION_TIMEOUT_MS, params_table, 0); lrmd_key_value_freeall(params); if (action == NULL) { return -ENOMEM; } if (action->rc != PCMK_OCF_UNKNOWN) { services_action_free(action); return -EINVAL; } if (!services_action_sync(action)) { crm_err("Failed to retrieve meta-data for %s:%s:%s", standard, provider, type); services_action_free(action); return -EIO; } if (!action->stdout_data) { crm_err("Failed to receive meta-data for %s:%s:%s", standard, provider, type); services_action_free(action); return -EIO; } *output = strdup(action->stdout_data); services_action_free(action); return pcmk_ok; } static int lrmd_api_exec(lrmd_t *lrmd, const char *rsc_id, const char *action, const char *userdata, guint interval_ms, int timeout, /* ms */ int start_delay, /* ms */ enum lrmd_call_options options, lrmd_key_value_t * params) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES); lrmd_key_value_t *tmp = NULL; pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ID, rsc_id); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ACTION, action); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_USERDATA_STR, userdata); pcmk__xe_set_guint(data, PCMK__XA_LRMD_RSC_INTERVAL, interval_ms); pcmk__xe_set_int(data, PCMK__XA_LRMD_TIMEOUT, timeout); pcmk__xe_set_int(data, PCMK__XA_LRMD_RSC_START_DELAY, start_delay); for (tmp = params; tmp; tmp = tmp->next) { hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args); } rc = lrmd_send_command(lrmd, LRMD_OP_RSC_EXEC, data, NULL, timeout, options, true); pcmk__xml_free(data); lrmd_key_value_freeall(params); return rc; } /* timeout is in ms */ static int lrmd_api_exec_alert(lrmd_t *lrmd, const char *alert_id, const char *alert_path, int timeout, lrmd_key_value_t *params) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_ALERT); xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES); lrmd_key_value_t *tmp = NULL; pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_ALERT_ID, alert_id); pcmk__xe_set(data, PCMK__XA_LRMD_ALERT_PATH, alert_path); pcmk__xe_set_int(data, PCMK__XA_LRMD_TIMEOUT, timeout); for (tmp = params; tmp; tmp = tmp->next) { hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args); } rc = lrmd_send_command(lrmd, LRMD_OP_ALERT_EXEC, data, NULL, timeout, lrmd_opt_notify_orig_only, true); pcmk__xml_free(data); lrmd_key_value_freeall(params); return rc; } static int lrmd_api_cancel(lrmd_t *lrmd, const char *rsc_id, const char *action, guint interval_ms) { int rc = pcmk_ok; xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC); pcmk__xe_set(data, PCMK__XA_LRMD_ORIGIN, __func__); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ACTION, action); pcmk__xe_set(data, PCMK__XA_LRMD_RSC_ID, rsc_id); pcmk__xe_set_guint(data, PCMK__XA_LRMD_RSC_INTERVAL, interval_ms); rc = lrmd_send_command(lrmd, LRMD_OP_RSC_CANCEL, data, NULL, 0, 0, true); pcmk__xml_free(data); return rc; } static int list_stonith_agents(lrmd_list_t ** resources) { int rc = 0; stonith_t *stonith_api = stonith__api_new(); stonith_key_value_t *stonith_resources = NULL; stonith_key_value_t *dIter = NULL; if (stonith_api == NULL) { crm_err("Could not list fence agents: API memory allocation failed"); return -ENOMEM; } stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL, &stonith_resources, 0); stonith_api->cmds->free(stonith_api); for (dIter = stonith_resources; dIter; dIter = dIter->next) { rc++; if (resources) { *resources = lrmd_list_add(*resources, dIter->value); } } stonith__key_value_freeall(stonith_resources, true, false); return rc; } static int lrmd_api_list_agents(lrmd_t * lrmd, lrmd_list_t ** resources, const char *class, const char *provider) { int rc = 0; int stonith_count = 0; // Initially, whether to include stonith devices if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) { stonith_count = 1; } else { GList *gIter = NULL; GList *agents = resources_list_agents(class, provider); for (gIter = agents; gIter != NULL; gIter = gIter->next) { *resources = lrmd_list_add(*resources, (const char *)gIter->data); rc++; } g_list_free_full(agents, free); if (!class) { stonith_count = 1; } } if (stonith_count) { // Now, if stonith devices are included, how many there are stonith_count = list_stonith_agents(resources); if (stonith_count > 0) { rc += stonith_count; } } if (rc == 0) { crm_notice("No agents found for class %s", class); rc = -EPROTONOSUPPORT; } return rc; } static bool does_provider_have_agent(const char *agent, const char *provider, const char *class) { bool found = false; GList *agents = NULL; GList *gIter2 = NULL; agents = resources_list_agents(class, provider); for (gIter2 = agents; gIter2 != NULL; gIter2 = gIter2->next) { if (pcmk__str_eq(agent, gIter2->data, pcmk__str_casei)) { found = true; } } g_list_free_full(agents, free); return found; } static int lrmd_api_list_ocf_providers(lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers) { int rc = pcmk_ok; char *provider = NULL; GList *ocf_providers = NULL; GList *gIter = NULL; ocf_providers = resources_list_providers(PCMK_RESOURCE_CLASS_OCF); for (gIter = ocf_providers; gIter != NULL; gIter = gIter->next) { provider = gIter->data; if (!agent || does_provider_have_agent(agent, provider, PCMK_RESOURCE_CLASS_OCF)) { *providers = lrmd_list_add(*providers, (const char *)gIter->data); rc++; } } g_list_free_full(ocf_providers, free); return rc; } static int lrmd_api_list_standards(lrmd_t * lrmd, lrmd_list_t ** supported) { int rc = 0; GList *standards = NULL; GList *gIter = NULL; standards = resources_list_standards(); for (gIter = standards; gIter != NULL; gIter = gIter->next) { *supported = lrmd_list_add(*supported, (const char *)gIter->data); rc++; } if (list_stonith_agents(NULL) > 0) { *supported = lrmd_list_add(*supported, PCMK_RESOURCE_CLASS_STONITH); rc++; } g_list_free_full(standards, free); return rc; } /*! * \internal * \brief Create an executor API object * * \param[out] api Will be set to newly created API object (it is the * caller's responsibility to free this value with * lrmd_api_delete() if this function succeeds) * \param[in] nodename If the object will be used for a remote connection, * the node name to use in cluster for remote executor * \param[in] server If the object will be used for a remote connection, * the resolvable host name to connect to * \param[in] port If the object will be used for a remote connection, * port number on \p server to connect to * * \return Standard Pacemaker return code * \note If the caller leaves one of \p nodename or \p server NULL, the other's * value will be used for both. If the caller leaves both NULL, an API * object will be created for a local executor connection. */ int lrmd__new(lrmd_t **api, const char *nodename, const char *server, int port) { lrmd_private_t *pvt = NULL; if (api == NULL) { return EINVAL; } *api = NULL; // Allocate all memory needed *api = calloc(1, sizeof(lrmd_t)); if (*api == NULL) { return ENOMEM; } pvt = calloc(1, sizeof(lrmd_private_t)); if (pvt == NULL) { lrmd_api_delete(*api); *api = NULL; return ENOMEM; } (*api)->lrmd_private = pvt; // @TODO Do we need to do this for local connections? pvt->remote = calloc(1, sizeof(pcmk__remote_t)); (*api)->cmds = calloc(1, sizeof(lrmd_api_operations_t)); if ((pvt->remote == NULL) || ((*api)->cmds == NULL)) { lrmd_api_delete(*api); *api = NULL; return ENOMEM; } // Set methods (*api)->cmds->connect = lrmd_api_connect; (*api)->cmds->connect_async = lrmd_api_connect_async; (*api)->cmds->is_connected = lrmd_api_is_connected; (*api)->cmds->poke_connection = lrmd_api_poke_connection; (*api)->cmds->disconnect = lrmd_api_disconnect; (*api)->cmds->register_rsc = lrmd_api_register_rsc; (*api)->cmds->unregister_rsc = lrmd_api_unregister_rsc; (*api)->cmds->get_rsc_info = lrmd_api_get_rsc_info; (*api)->cmds->get_recurring_ops = lrmd_api_get_recurring_ops; (*api)->cmds->set_callback = lrmd_api_set_callback; (*api)->cmds->get_metadata = lrmd_api_get_metadata; (*api)->cmds->exec = lrmd_api_exec; (*api)->cmds->cancel = lrmd_api_cancel; (*api)->cmds->list_agents = lrmd_api_list_agents; (*api)->cmds->list_ocf_providers = lrmd_api_list_ocf_providers; (*api)->cmds->list_standards = lrmd_api_list_standards; (*api)->cmds->exec_alert = lrmd_api_exec_alert; (*api)->cmds->get_metadata_params = lrmd_api_get_metadata_params; if ((nodename == NULL) && (server == NULL)) { pvt->type = pcmk__client_ipc; } else { if (nodename == NULL) { nodename = server; } else if (server == NULL) { server = nodename; } pvt->type = pcmk__client_tls; pvt->remote_nodename = strdup(nodename); pvt->server = strdup(server); if ((pvt->remote_nodename == NULL) || (pvt->server == NULL)) { lrmd_api_delete(*api); *api = NULL; return ENOMEM; } pvt->port = port; if (pvt->port == 0) { pvt->port = crm_default_remote_port(); } } return pcmk_rc_ok; } lrmd_t * lrmd_api_new(void) { lrmd_t *api = NULL; pcmk__assert(lrmd__new(&api, NULL, NULL, 0) == pcmk_rc_ok); return api; } lrmd_t * lrmd_remote_api_new(const char *nodename, const char *server, int port) { lrmd_t *api = NULL; pcmk__assert(lrmd__new(&api, nodename, server, port) == pcmk_rc_ok); return api; } void lrmd_api_delete(lrmd_t * lrmd) { if (lrmd == NULL) { return; } if (lrmd->cmds != NULL) { // Never NULL, but make static analysis happy if (lrmd->cmds->disconnect != NULL) { // Also never really NULL lrmd->cmds->disconnect(lrmd); // No-op if already disconnected } free(lrmd->cmds); } if (lrmd->lrmd_private != NULL) { lrmd_private_t *native = lrmd->lrmd_private; free(native->server); free(native->remote_nodename); free(native->remote); free(native->token); free(native->peer_version); free(lrmd->lrmd_private); } free(lrmd); } struct metadata_cb { void (*callback)(int pid, const pcmk__action_result_t *result, void *user_data); void *user_data; }; /*! * \internal * \brief Process asynchronous metadata completion * * \param[in,out] action Metadata action that completed */ static void metadata_complete(svc_action_t *action) { struct metadata_cb *metadata_cb = (struct metadata_cb *) action->cb_data; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; services__copy_result(action, &result); pcmk__set_result_output(&result, action->stdout_data, action->stderr_data); metadata_cb->callback(0, &result, metadata_cb->user_data); result.action_stdout = NULL; // Prevent free, because action owns it result.action_stderr = NULL; // Prevent free, because action owns it pcmk__reset_result(&result); free(metadata_cb); } /*! * \internal * \brief Retrieve agent metadata asynchronously * * \param[in] rsc Resource agent specification * \param[in] callback Function to call with result (this will always be * called, whether by this function directly or later * via the main loop, and on success the metadata will * be in its result argument's action_stdout) * \param[in,out] user_data User data to pass to callback * * \return Standard Pacemaker return code * \note This function is not a lrmd_api_operations_t method because it does not * need an lrmd_t object and does not go through the executor, but * executes the agent directly. */ int lrmd__metadata_async(const lrmd_rsc_info_t *rsc, void (*callback)(int pid, const pcmk__action_result_t *result, void *user_data), void *user_data) { svc_action_t *action = NULL; struct metadata_cb *metadata_cb = NULL; pcmk__action_result_t result = PCMK__UNKNOWN_RESULT; CRM_CHECK(callback != NULL, return EINVAL); if ((rsc == NULL) || (rsc->standard == NULL) || (rsc->type == NULL)) { pcmk__set_result(&result, PCMK_OCF_NOT_CONFIGURED, PCMK_EXEC_ERROR_FATAL, "Invalid resource specification"); callback(0, &result, user_data); pcmk__reset_result(&result); return EINVAL; } if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_STONITH) == 0) { return stonith__metadata_async(rsc->type, pcmk__timeout_ms2s(PCMK_DEFAULT_ACTION_TIMEOUT_MS), callback, user_data); } action = services__create_resource_action(pcmk__s(rsc->id, rsc->type), rsc->standard, rsc->provider, rsc->type, PCMK_ACTION_META_DATA, 0, PCMK_DEFAULT_ACTION_TIMEOUT_MS, NULL, 0); if (action == NULL) { pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Out of memory"); callback(0, &result, user_data); pcmk__reset_result(&result); return ENOMEM; } if (action->rc != PCMK_OCF_UNKNOWN) { services__copy_result(action, &result); callback(0, &result, user_data); pcmk__reset_result(&result); services_action_free(action); return EINVAL; } action->cb_data = calloc(1, sizeof(struct metadata_cb)); if (action->cb_data == NULL) { services_action_free(action); pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR, "Out of memory"); callback(0, &result, user_data); pcmk__reset_result(&result); return ENOMEM; } metadata_cb = (struct metadata_cb *) action->cb_data; metadata_cb->callback = callback; metadata_cb->user_data = user_data; if (!services_action_async(action, metadata_complete)) { services_action_free(action); return pcmk_rc_error; // @TODO Derive from action->rc and ->status } // The services library has taken responsibility for action return pcmk_rc_ok; } /*! * \internal * \brief Set the result of an executor event * * \param[in,out] event Executor event to set * \param[in] rc OCF exit status of event * \param[in] op_status Executor status of event * \param[in] exit_reason Human-friendly description of event */ void lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc, int op_status, const char *exit_reason) { if (event == NULL) { return; } event->rc = rc; event->op_status = op_status; // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees pcmk__str_update((char **) &event->exit_reason, exit_reason); } /*! * \internal * \brief Clear an executor event's exit reason, output, and error output * * \param[in,out] event Executor event to reset */ void lrmd__reset_result(lrmd_event_data_t *event) { if (event == NULL) { return; } free((void *) event->exit_reason); event->exit_reason = NULL; free((void *) event->output); event->output = NULL; } /*! * \internal * \brief Get the uptime of a remote resource connection * * When the cluster connects to a remote resource, part of that resource's * handshake includes the uptime of the remote resource's connection. This * uptime is stored in the lrmd_t object. * * \return The connection's uptime, or -1 if unknown */ time_t lrmd__uptime(lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote == NULL) { return -1; } else { return native->remote->uptime; } } const char * lrmd__node_start_state(lrmd_t *lrmd) { lrmd_private_t *native = lrmd->lrmd_private; if (native->remote == NULL) { return NULL; } else { return native->remote->start_state; } } diff --git a/lib/pacemaker/pcmk_injections.c b/lib/pacemaker/pcmk_injections.c index 019421d104..498c06af0c 100644 --- a/lib/pacemaker/pcmk_injections.c +++ b/lib/pacemaker/pcmk_injections.c @@ -1,799 +1,799 @@ /* * Copyright 2009-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include // xmlNode #include #include #include #include #include #include #include // lrmd_event_data_t, etc. #include #include #include #include "libpacemaker_private.h" // @TODO Replace this with a new scheduler flag bool pcmk__simulate_node_config = false; #define XPATH_NODE_CONFIG "//" PCMK_XE_NODE "[@" PCMK_XA_UNAME "='%s']" #define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" #define XPATH_NODE_STATE_BY_ID "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "='%s']" #define XPATH_RSC_HISTORY XPATH_NODE_STATE \ "//" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']" /*! * \internal * \brief Inject a fictitious transient node attribute into scheduler input * * \param[in,out] out Output object for displaying error messages * \param[in,out] cib_node \c PCMK__XE_NODE_STATE XML to inject attribute into * \param[in] name Transient node attribute name to inject * \param[in] value Transient node attribute value to inject */ static void inject_transient_attr(pcmk__output_t *out, xmlNode *cib_node, const char *name, const char *value) { xmlNode *attrs = NULL; xmlNode *instance_attrs = NULL; const char *node_uuid = pcmk__xe_id(cib_node); out->message(out, "inject-attr", name, value, cib_node); attrs = pcmk__xe_first_child(cib_node, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL, NULL); if (attrs == NULL) { attrs = pcmk__xe_create(cib_node, PCMK__XE_TRANSIENT_ATTRIBUTES); pcmk__xe_set(attrs, PCMK_XA_ID, node_uuid); } instance_attrs = pcmk__xe_first_child(attrs, PCMK_XE_INSTANCE_ATTRIBUTES, NULL, NULL); if (instance_attrs == NULL) { instance_attrs = pcmk__xe_create(attrs, PCMK_XE_INSTANCE_ATTRIBUTES); pcmk__xe_set(instance_attrs, PCMK_XA_ID, node_uuid); } crm_create_nvpair_xml(instance_attrs, NULL, name, value); } /*! * \internal * \brief Inject a fictitious fail count into a scheduler input * * \param[in,out] out Output object for displaying error messages * \param[in,out] cib_conn CIB connection * \param[in,out] cib_node Node state XML to inject into * \param[in] resource ID of resource for fail count to inject * \param[in] task Action name for fail count to inject * \param[in] interval_ms Action interval (in milliseconds) for fail count * \param[in] exit_status Action result for fail count to inject (if * \c PCMK_OCF_OK, or \c PCMK_OCF_NOT_RUNNING when * \p interval_ms is 0, inject nothing) * \param[in] infinity If true, set fail count to "INFINITY", otherwise * increase it by 1 */ void pcmk__inject_failcount(pcmk__output_t *out, cib_t *cib_conn, xmlNode *cib_node, const char *resource, const char *task, guint interval_ms, int exit_status, bool infinity) { char *name = NULL; char *value = NULL; int failcount = 0; xmlNode *output = NULL; CRM_CHECK((out != NULL) && (cib_conn != NULL) && (cib_node != NULL) && (resource != NULL) && (task != NULL), return); if ((exit_status == PCMK_OCF_OK) || ((exit_status == PCMK_OCF_NOT_RUNNING) && (interval_ms == 0))) { return; } // Get current failcount and increment it name = pcmk__failcount_name(resource, task, interval_ms); if (cib__get_node_attrs(out, cib_conn, PCMK_XE_STATUS, pcmk__xe_id(cib_node), NULL, NULL, NULL, name, NULL, &output) == pcmk_rc_ok) { if (pcmk__xe_get_int(output, PCMK_XA_VALUE, &failcount) != pcmk_rc_ok) { failcount = 0; } } if (infinity) { value = pcmk__str_copy(PCMK_VALUE_INFINITY); } else { value = pcmk__itoa(failcount + 1); } inject_transient_attr(out, cib_node, name, value); free(name); free(value); pcmk__xml_free(output); name = pcmk__lastfailure_name(resource, task, interval_ms); value = pcmk__ttoa(time(NULL)); inject_transient_attr(out, cib_node, name, value); free(name); free(value); } /*! * \internal * \brief Create a CIB configuration entry for a fictitious node * * \param[in,out] cib_conn CIB object to use * \param[in] node Node name to use */ static void create_node_entry(cib_t *cib_conn, const char *node) { int rc = pcmk_ok; char *xpath = pcmk__assert_asprintf(XPATH_NODE_CONFIG, node); rc = cib_conn->cmds->query(cib_conn, xpath, NULL, cib_xpath|cib_sync_call); if (rc == -ENXIO) { // Only add if not already existing xmlNode *cib_object = pcmk__xe_create(NULL, PCMK_XE_NODE); pcmk__xe_set(cib_object, PCMK_XA_ID, node); // Use node name as ID pcmk__xe_set(cib_object, PCMK_XA_UNAME, node); cib_conn->cmds->create(cib_conn, PCMK_XE_NODES, cib_object, cib_sync_call); /* Not bothering with subsequent query to see if it exists, we'll bomb out later in the call to query_node_uuid()... */ pcmk__xml_free(cib_object); } free(xpath); } /*! * \internal * \brief Synthesize a fake executor event for an action * * \param[in] cib_resource XML for any existing resource action history * \param[in] task Name of action to synthesize * \param[in] interval_ms Interval of action to synthesize * \param[in] outcome Result of action to synthesize * * \return Newly allocated executor event * \note It is the caller's responsibility to free the result with * lrmd_free_event(). */ static lrmd_event_data_t * create_op(const xmlNode *cib_resource, const char *task, guint interval_ms, int outcome) { lrmd_event_data_t *op = NULL; xmlNode *xop = NULL; op = lrmd_new_event(pcmk__xe_id(cib_resource), task, interval_ms); lrmd__set_result(op, outcome, PCMK_EXEC_DONE, "Simulated action result"); op->params = NULL; // Not needed for simulation purposes op->t_run = time(NULL); op->t_rcchange = op->t_run; // Use a call ID higher than any existing history entries op->call_id = 0; for (xop = pcmk__xe_first_child(cib_resource, NULL, NULL, NULL); xop != NULL; xop = pcmk__xe_next(xop, NULL)) { int tmp = 0; pcmk__xe_get_int(xop, PCMK__XA_CALL_ID, &tmp); if (tmp > op->call_id) { op->call_id = tmp; } } op->call_id++; return op; } /*! * \internal * \brief Inject a fictitious resource history entry into a scheduler input * * \param[in,out] cib_resource Resource history XML to inject entry into * \param[in,out] op Action result to inject * \param[in] node Name of node where the action occurred * \param[in] target_rc Expected result for action to inject * * \return XML of injected resource history entry */ xmlNode * pcmk__inject_action_result(xmlNode *cib_resource, lrmd_event_data_t *op, const char *node, int target_rc) { return pcmk__create_history_xml(cib_resource, op, CRM_FEATURE_SET, target_rc, node, crm_system_name); } /*! * \internal * \brief Inject a fictitious node into a scheduler input * * \param[in,out] cib_conn Scheduler input CIB to inject node into * \param[in] node Name of node to inject * \param[in] uuid UUID of node to inject * * \return XML of \c PCMK__XE_NODE_STATE entry for new node * \note If the global pcmk__simulate_node_config has been set to true, a * node entry in the configuration section will be added, as well as a * node state entry in the status section. */ xmlNode * pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid) { int rc = pcmk_ok; xmlNode *cib_object = NULL; char *xpath = pcmk__assert_asprintf(XPATH_NODE_STATE, node); bool duplicate = false; char *found_uuid = NULL; if (pcmk__simulate_node_config) { create_node_entry(cib_conn, node); } rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object, cib_xpath|cib_sync_call); if ((cib_object != NULL) && (pcmk__xe_id(cib_object) == NULL)) { crm_err("Detected multiple " PCMK__XE_NODE_STATE " entries for " "xpath=%s, bailing", xpath); duplicate = true; goto done; } if (rc == -ENXIO) { if (uuid == NULL) { query_node_uuid(cib_conn, node, &found_uuid, NULL); } else { found_uuid = strdup(uuid); } if (found_uuid) { char *xpath_by_uuid = pcmk__assert_asprintf(XPATH_NODE_STATE_BY_ID, found_uuid); /* It's possible that a PCMK__XE_NODE_STATE entry doesn't have a * PCMK_XA_UNAME yet */ rc = cib_conn->cmds->query(cib_conn, xpath_by_uuid, &cib_object, cib_xpath|cib_sync_call); if ((cib_object != NULL) && (pcmk__xe_id(cib_object) == NULL)) { crm_err("Can't inject node state for %s because multiple " "state entries found for ID %s", node, found_uuid); duplicate = true; free(xpath_by_uuid); goto done; } else if (cib_object != NULL) { pcmk__xe_set(cib_object, PCMK_XA_UNAME, node); rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_STATUS, cib_object, cib_sync_call); } free(xpath_by_uuid); } } if (rc == -ENXIO) { cib_object = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE); pcmk__xe_set(cib_object, PCMK_XA_ID, found_uuid); pcmk__xe_set(cib_object, PCMK_XA_UNAME, node); cib_conn->cmds->create(cib_conn, PCMK_XE_STATUS, cib_object, cib_sync_call); pcmk__xml_free(cib_object); rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object, cib_xpath|cib_sync_call); crm_trace("Injecting node state for %s (rc=%d)", node, rc); } done: free(found_uuid); free(xpath); if (duplicate) { crm_log_xml_warn(cib_object, "Duplicates"); crm_exit(CRM_EX_SOFTWARE); return NULL; // not reached, but makes static analysis happy } pcmk__assert(rc == pcmk_ok); return cib_object; } /*! * \internal * \brief Inject a fictitious node state change into a scheduler input * * \param[in,out] cib_conn Scheduler input CIB to inject into * \param[in] node Name of node to inject change for * \param[in] up If true, change state to online, otherwise offline * * \return XML of changed (or added) node state entry */ xmlNode * pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up) { xmlNode *cib_node = pcmk__inject_node(cib_conn, node, NULL); if (up) { pcmk__xe_set_props(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_TRUE, PCMK_XA_CRMD, PCMK_VALUE_ONLINE, PCMK__XA_JOIN, CRMD_JOINSTATE_MEMBER, PCMK_XA_EXPECTED, CRMD_JOINSTATE_MEMBER, NULL); } else { pcmk__xe_set_props(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_FALSE, PCMK_XA_CRMD, PCMK_VALUE_OFFLINE, PCMK__XA_JOIN, CRMD_JOINSTATE_DOWN, PCMK_XA_EXPECTED, CRMD_JOINSTATE_DOWN, NULL); } pcmk__xe_set(cib_node, PCMK_XA_CRM_DEBUG_ORIGIN, crm_system_name); return cib_node; } /*! * \internal * \brief Check whether a node has history for a given resource * * \param[in,out] cib_node Node state XML to check * \param[in] resource Resource name to check for * * \return Resource's \c PCMK__XE_LRM_RESOURCE XML entry beneath \p cib_node if * found, otherwise \c NULL */ static xmlNode * find_resource_xml(xmlNode *cib_node, const char *resource) { const char *node = pcmk__xe_get(cib_node, PCMK_XA_UNAME); char *xpath = pcmk__assert_asprintf(XPATH_RSC_HISTORY, node, resource); xmlNode *match = pcmk__xpath_find_one(cib_node->doc, xpath, LOG_TRACE); free(xpath); return match; } /*! * \internal * \brief Inject a resource history element into a scheduler input * * \param[in,out] out Output object for displaying error messages * \param[in,out] cib_node Node state XML to inject resource history entry into * \param[in] resource ID (in configuration) of resource to inject * \param[in] lrm_name ID as used in history (could be clone instance) * \param[in] rclass Resource agent class of resource to inject * \param[in] rtype Resource agent type of resource to inject * \param[in] rprovider Resource agent provider of resource to inject * * \return XML of injected resource history element * \note If a history element already exists under either \p resource or * \p lrm_name, this will return it rather than injecting a new one. */ xmlNode * pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node, const char *resource, const char *lrm_name, const char *rclass, const char *rtype, const char *rprovider) { xmlNode *lrm = NULL; xmlNode *container = NULL; xmlNode *cib_resource = NULL; cib_resource = find_resource_xml(cib_node, resource); if (cib_resource != NULL) { /* If an existing LRM history entry uses the resource name, * continue using it, even if lrm_name is different. */ return cib_resource; } // Check for history entry under preferred name if (strcmp(resource, lrm_name) != 0) { cib_resource = find_resource_xml(cib_node, lrm_name); if (cib_resource != NULL) { return cib_resource; } } if ((rclass == NULL) || (rtype == NULL)) { // @TODO query configuration for class, provider, type out->err(out, "Resource %s not found in the status section of %s " "(supply class and type to continue)", resource, pcmk__xe_id(cib_node)); return NULL; } else if (!pcmk__strcase_any_of(rclass, PCMK_RESOURCE_CLASS_OCF, PCMK_RESOURCE_CLASS_STONITH, PCMK_RESOURCE_CLASS_SERVICE, PCMK_RESOURCE_CLASS_SYSTEMD, PCMK_RESOURCE_CLASS_LSB, NULL)) { out->err(out, "Invalid class for %s: %s", resource, rclass); return NULL; } else if (pcmk__is_set(pcmk_get_ra_caps(rclass), pcmk_ra_cap_provider) && (rprovider == NULL)) { // @TODO query configuration for provider out->err(out, "Please specify the provider for resource %s", resource); return NULL; } crm_info("Injecting new resource %s into node state '%s'", lrm_name, pcmk__xe_id(cib_node)); lrm = pcmk__xe_first_child(cib_node, PCMK__XE_LRM, NULL, NULL); if (lrm == NULL) { const char *node_uuid = pcmk__xe_id(cib_node); lrm = pcmk__xe_create(cib_node, PCMK__XE_LRM); pcmk__xe_set(lrm, PCMK_XA_ID, node_uuid); } container = pcmk__xe_first_child(lrm, PCMK__XE_LRM_RESOURCES, NULL, NULL); if (container == NULL) { container = pcmk__xe_create(lrm, PCMK__XE_LRM_RESOURCES); } cib_resource = pcmk__xe_create(container, PCMK__XE_LRM_RESOURCE); // If we're creating a new entry, use the preferred name pcmk__xe_set(cib_resource, PCMK_XA_ID, lrm_name); pcmk__xe_set(cib_resource, PCMK_XA_CLASS, rclass); pcmk__xe_set(cib_resource, PCMK_XA_PROVIDER, rprovider); pcmk__xe_set(cib_resource, PCMK_XA_TYPE, rtype); return cib_resource; } /*! * \internal * \brief Inject a ticket attribute into ticket state * * \param[in,out] out Output object for displaying error messages * \param[in] ticket_id Ticket whose state should be changed * \param[in] attr_name Ticket attribute name to inject * \param[in] attr_value Boolean value of ticket attribute to inject * \param[in,out] cib CIB object to use * * \return Standard Pacemaker return code */ static int set_ticket_state_attr(pcmk__output_t *out, const char *ticket_id, const char *attr_name, bool attr_value, cib_t *cib) { int rc = pcmk_rc_ok; xmlNode *xml_top = NULL; xmlNode *ticket_state_xml = NULL; // Check for an existing ticket state entry rc = pcmk__get_ticket_state(cib, ticket_id, &ticket_state_xml); if (rc == pcmk_rc_duplicate_id) { out->err(out, "Multiple " PCMK__XE_TICKET_STATE "s match ticket_id=%s", ticket_id); rc = pcmk_rc_ok; } if (rc == pcmk_rc_ok) { // Ticket state found, use it crm_debug("Injecting attribute into existing ticket state %s", ticket_id); xml_top = ticket_state_xml; } else if (rc == ENXIO) { // No ticket state, create it xmlNode *xml_obj = NULL; xml_top = pcmk__xe_create(NULL, PCMK_XE_STATUS); xml_obj = pcmk__xe_create(xml_top, PCMK_XE_TICKETS); ticket_state_xml = pcmk__xe_create(xml_obj, PCMK__XE_TICKET_STATE); pcmk__xe_set(ticket_state_xml, PCMK_XA_ID, ticket_id); } else { // Error return rc; } // Add the attribute to the ticket state - pcmk__xe_set_bool_attr(ticket_state_xml, attr_name, attr_value); + pcmk__xe_set_bool(ticket_state_xml, attr_name, attr_value); crm_log_xml_debug(xml_top, "Update"); // Commit the change to the CIB rc = cib->cmds->modify(cib, PCMK_XE_STATUS, xml_top, cib_sync_call); rc = pcmk_legacy2rc(rc); pcmk__xml_free(xml_top); return rc; } /*! * \internal * \brief Inject a fictitious action into the cluster * * \param[in,out] out Output object for displaying error messages * \param[in] spec Action specification to inject * \param[in,out] cib CIB object for scheduler input * \param[in] scheduler Scheduler data */ static void inject_action(pcmk__output_t *out, const char *spec, cib_t *cib, const pcmk_scheduler_t *scheduler) { int rc; int outcome = PCMK_OCF_OK; guint interval_ms = 0; char *key = NULL; char *node = NULL; char *task = NULL; char *resource = NULL; const char *rtype = NULL; const char *rclass = NULL; const char *rprovider = NULL; xmlNode *cib_op = NULL; xmlNode *cib_node = NULL; xmlNode *cib_resource = NULL; const pcmk_resource_t *rsc = NULL; lrmd_event_data_t *op = NULL; bool infinity = false; out->message(out, "inject-spec", spec); key = pcmk__assert_alloc(1, strlen(spec) + 1); node = pcmk__assert_alloc(1, strlen(spec) + 1); rc = sscanf(spec, "%[^@]@%[^=]=%d", key, node, &outcome); if (rc != 3) { out->err(out, "Invalid operation spec: %s. Only found %d fields", spec, rc); goto done; } parse_op_key(key, &resource, &task, &interval_ms); rsc = pe_find_resource(scheduler->priv->resources, resource); if (rsc == NULL) { out->err(out, "Invalid resource name: %s", resource); goto done; } rclass = pcmk__xe_get(rsc->priv->xml, PCMK_XA_CLASS); rtype = pcmk__xe_get(rsc->priv->xml, PCMK_XA_TYPE); rprovider = pcmk__xe_get(rsc->priv->xml, PCMK_XA_PROVIDER); cib_node = pcmk__inject_node(cib, node, NULL); pcmk__assert(cib_node != NULL); if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_none)) { infinity = true; } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_none) && pcmk__is_set(scheduler->flags, pcmk__sched_start_failure_fatal)) { infinity = true; } pcmk__inject_failcount(out, cib, cib_node, resource, task, interval_ms, outcome, infinity); cib_resource = pcmk__inject_resource_history(out, cib_node, resource, resource, rclass, rtype, rprovider); pcmk__assert(cib_resource != NULL); op = create_op(cib_resource, task, interval_ms, outcome); pcmk__assert(op != NULL); cib_op = pcmk__inject_action_result(cib_resource, op, node, 0); pcmk__assert(cib_op != NULL); lrmd_free_event(op); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); pcmk__assert(rc == pcmk_ok); done: free(task); free(node); free(key); } /*! * \internal * \brief Inject fictitious scheduler inputs * * \param[in,out] scheduler Scheduler data * \param[in,out] cib CIB object for scheduler input to modify * \param[in] injections Injections to apply */ void pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib, const pcmk_injections_t *injections) { int rc = pcmk_ok; const GList *iter = NULL; xmlNode *cib_node = NULL; pcmk__output_t *out = scheduler->priv->out; out->message(out, "inject-modify-config", injections->quorum, injections->watchdog); if (injections->quorum != NULL) { xmlNode *top = pcmk__xe_create(NULL, PCMK_XE_CIB); /* pcmk__xe_set(top, PCMK_XA_DC_UUID, dc_uuid); */ pcmk__xe_set(top, PCMK_XA_HAVE_QUORUM, injections->quorum); rc = cib->cmds->modify(cib, NULL, top, cib_sync_call); pcmk__assert(rc == pcmk_ok); } if (injections->watchdog != NULL) { rc = cib__update_node_attr(out, cib, cib_sync_call, PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL, PCMK_OPT_HAVE_WATCHDOG, injections->watchdog, NULL, NULL); pcmk__assert(rc == pcmk_rc_ok); } for (iter = injections->node_up; iter != NULL; iter = iter->next) { const char *node = (const char *) iter->data; out->message(out, "inject-modify-node", "Online", node); cib_node = pcmk__inject_node_state_change(cib, node, true); pcmk__assert(cib_node != NULL); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); pcmk__assert(rc == pcmk_ok); pcmk__xml_free(cib_node); } for (iter = injections->node_down; iter != NULL; iter = iter->next) { const char *node = (const char *) iter->data; char *xpath = NULL; out->message(out, "inject-modify-node", "Offline", node); cib_node = pcmk__inject_node_state_change(cib, node, false); pcmk__assert(cib_node != NULL); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); pcmk__assert(rc == pcmk_ok); pcmk__xml_free(cib_node); xpath = pcmk__assert_asprintf("//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" "/" PCMK__XE_LRM, node); cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_sync_call); free(xpath); xpath = pcmk__assert_asprintf("//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" "/" PCMK__XE_TRANSIENT_ATTRIBUTES, node); cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_sync_call); free(xpath); } for (iter = injections->node_fail; iter != NULL; iter = iter->next) { const char *node = (const char *) iter->data; out->message(out, "inject-modify-node", "Failing", node); cib_node = pcmk__inject_node_state_change(cib, node, true); pcmk__xe_set(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_FALSE); pcmk__assert(cib_node != NULL); rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call); pcmk__assert(rc == pcmk_ok); pcmk__xml_free(cib_node); } for (iter = injections->ticket_grant; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Granting", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK__XA_GRANTED, true, cib); pcmk__assert(rc == pcmk_rc_ok); } for (iter = injections->ticket_revoke; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Revoking", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK__XA_GRANTED, false, cib); pcmk__assert(rc == pcmk_rc_ok); } for (iter = injections->ticket_standby; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Standby", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK_XA_STANDBY, true, cib); pcmk__assert(rc == pcmk_rc_ok); } for (iter = injections->ticket_activate; iter != NULL; iter = iter->next) { const char *ticket_id = (const char *) iter->data; out->message(out, "inject-modify-ticket", "Activating", ticket_id); rc = set_ticket_state_attr(out, ticket_id, PCMK_XA_STANDBY, false, cib); pcmk__assert(rc == pcmk_rc_ok); } for (iter = injections->op_inject; iter != NULL; iter = iter->next) { inject_action(out, (const char *) iter->data, cib, scheduler); } if (!out->is_quiet(out)) { out->end_list(out); } } void pcmk_free_injections(pcmk_injections_t *injections) { if (injections == NULL) { return; } g_list_free_full(injections->node_up, g_free); g_list_free_full(injections->node_down, g_free); g_list_free_full(injections->node_fail, g_free); g_list_free_full(injections->op_fail, g_free); g_list_free_full(injections->op_inject, g_free); g_list_free_full(injections->ticket_grant, g_free); g_list_free_full(injections->ticket_revoke, g_free); g_list_free_full(injections->ticket_standby, g_free); g_list_free_full(injections->ticket_activate, g_free); free(injections->quorum); free(injections->watchdog); free(injections); } diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c index 930c0d9cde..b0f8a23461 100644 --- a/lib/pacemaker/pcmk_output.c +++ b/lib/pacemaker/pcmk_output.c @@ -1,2720 +1,2720 @@ /* * Copyright 2019-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include // stonith__* #include #include #include #include #include static char * colocations_header(pcmk_resource_t *rsc, pcmk__colocation_t *cons, bool dependents) { char *retval = NULL; if (cons->primary_role > pcmk_role_started) { retval = pcmk__assert_asprintf("%s (score=%s, %s role=%s, id=%s)", rsc->id, pcmk_readable_score(cons->score), (dependents? "needs" : "with"), pcmk_role_text(cons->primary_role), cons->id); } else { retval = pcmk__assert_asprintf("%s (score=%s, id=%s)", rsc->id, pcmk_readable_score(cons->score), cons->id); } return retval; } static void colocations_xml_node(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk__colocation_t *cons) { const char *score = pcmk_readable_score(cons->score); const char *dependent_role = NULL; const char *primary_role = NULL; if (cons->dependent_role != pcmk_role_unknown) { dependent_role = pcmk_role_text(cons->dependent_role); } if (cons->primary_role != pcmk_role_unknown) { primary_role = pcmk_role_text(cons->primary_role); } pcmk__output_create_xml_node(out, PCMK_XE_RSC_COLOCATION, PCMK_XA_ID, cons->id, PCMK_XA_RSC, cons->dependent->id, PCMK_XA_WITH_RSC, cons->primary->id, PCMK_XA_SCORE, score, PCMK_XA_NODE_ATTRIBUTE, cons->node_attribute, PCMK_XA_RSC_ROLE, dependent_role, PCMK_XA_WITH_RSC_ROLE, primary_role, NULL); } static int do_locations_list_xml(pcmk__output_t *out, pcmk_resource_t *rsc, bool add_header) { GList *lpc = NULL; int rc = pcmk_rc_no_output; for (lpc = rsc->priv->location_constraints; lpc != NULL; lpc = lpc->next) { pcmk__location_t *cons = lpc->data; GList *lpc2 = NULL; for (lpc2 = cons->nodes; lpc2 != NULL; lpc2 = lpc2->next) { pcmk_node_t *node = (pcmk_node_t *) lpc2->data; if (add_header) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "locations"); } pcmk__output_create_xml_node(out, PCMK_XE_RSC_LOCATION, PCMK_XA_NODE, node->priv->name, PCMK_XA_RSC, rsc->id, PCMK_XA_ID, cons->id, PCMK_XA_SCORE, pcmk_readable_score(node->assign->score), NULL); } } if (add_header) { PCMK__OUTPUT_LIST_FOOTER(out, rc); } return rc; } PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pcmk_resource_t *", "pcmk_node_t *", "pcmk_node_t *", "pcmk_action_t *", "pcmk_action_t *") static int rsc_action_item(pcmk__output_t *out, va_list args) { const char *change = va_arg(args, const char *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *origin = va_arg(args, pcmk_node_t *); pcmk_node_t *destination = va_arg(args, pcmk_node_t *); pcmk_action_t *action = va_arg(args, pcmk_action_t *); pcmk_action_t *source = va_arg(args, pcmk_action_t *); int len = 0; char *reason = NULL; char *details = NULL; bool same_host = false; bool same_role = false; bool need_role = false; static int rsc_width = 5; static int detail_width = 5; pcmk__assert((action != NULL) && ((destination != NULL) || (origin != NULL))); if (source == NULL) { source = action; } len = strlen(rsc->id); if (len > rsc_width) { rsc_width = len + 2; } if ((rsc->priv->orig_role > pcmk_role_started) || (rsc->priv->next_role > pcmk_role_unpromoted)) { need_role = true; } if (pcmk__same_node(origin, destination)) { same_host = true; } if (rsc->priv->orig_role == rsc->priv->next_role) { same_role = true; } if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ details = pcmk__assert_asprintf("%s -> %s %s", pcmk_role_text(rsc->priv->orig_role), pcmk_role_text(rsc->priv->next_role), pcmk__node_name(destination)); } else if (origin == NULL) { /* Starting a resource */ details = pcmk__assert_asprintf("%s", pcmk__node_name(destination)); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ details = pcmk__assert_asprintf("%s %s", pcmk_role_text(rsc->priv->orig_role), pcmk__node_name(origin)); } else if (destination == NULL) { /* Stopping a resource */ details = pcmk__assert_asprintf("%s", pcmk__node_name(origin)); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ details = pcmk__assert_asprintf("%s %s", pcmk_role_text(rsc->priv->orig_role), pcmk__node_name(origin)); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ details = pcmk__assert_asprintf("%s", pcmk__node_name(origin)); } else if (need_role && same_role) { /* Moving a promotable clone instance */ details = pcmk__assert_asprintf("%s -> %s %s", pcmk__node_name(origin), pcmk__node_name(destination), pcmk_role_text(rsc->priv->orig_role)); } else if (same_role) { /* Moving a normal resource */ details = pcmk__assert_asprintf("%s -> %s", pcmk__node_name(origin), pcmk__node_name(destination)); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ details = pcmk__assert_asprintf("%s -> %s %s", pcmk_role_text(rsc->priv->orig_role), pcmk_role_text(rsc->priv->next_role), pcmk__node_name(origin)); } else { /* Moving and promoting/demoting */ details = pcmk__assert_asprintf("%s %s -> %s %s", pcmk_role_text(rsc->priv->orig_role), pcmk__node_name(origin), pcmk_role_text(rsc->priv->next_role), pcmk__node_name(destination)); } len = strlen(details); if (len > detail_width) { detail_width = len; } if ((source->reason != NULL) && !pcmk__is_set(action->flags, pcmk__action_runnable)) { reason = pcmk__assert_asprintf("due to %s (blocked)", source->reason); } else if (source->reason) { reason = pcmk__assert_asprintf("due to %s", source->reason); } else if (!pcmk__is_set(action->flags, pcmk__action_runnable)) { reason = strdup("blocked"); } out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s", change, rsc_width, rsc->id, detail_width, details, ((reason == NULL)? "" : " "), pcmk__s(reason, "")); free(details); free(reason); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pcmk_resource_t *", "pcmk_node_t *", "pcmk_node_t *", "pcmk_action_t *", "pcmk_action_t *") static int rsc_action_item_xml(pcmk__output_t *out, va_list args) { const char *change = va_arg(args, const char *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *origin = va_arg(args, pcmk_node_t *); pcmk_node_t *destination = va_arg(args, pcmk_node_t *); pcmk_action_t *action = va_arg(args, pcmk_action_t *); pcmk_action_t *source = va_arg(args, pcmk_action_t *); char *change_str = NULL; bool same_host = false; bool same_role = false; bool need_role = false; xmlNode *xml = NULL; pcmk__assert((action != NULL) && ((destination != NULL) || (origin != NULL))); if (source == NULL) { source = action; } if ((rsc->priv->orig_role > pcmk_role_started) || (rsc->priv->next_role > pcmk_role_unpromoted)) { need_role = true; } if (pcmk__same_node(origin, destination)) { same_host = true; } if (rsc->priv->orig_role == rsc->priv->next_role) { same_role = true; } change_str = g_ascii_strdown(change, -1); xml = pcmk__output_create_xml_node(out, PCMK_XE_RSC_ACTION, PCMK_XA_ACTION, change_str, PCMK_XA_RESOURCE, rsc->id, NULL); g_free(change_str); if (need_role && (origin == NULL)) { /* Starting and promoting a promotable clone instance */ pcmk__xe_set_props(xml, PCMK_XA_ROLE, pcmk_role_text(rsc->priv->orig_role), PCMK_XA_NEXT_ROLE, pcmk_role_text(rsc->priv->next_role), PCMK_XA_DEST, destination->priv->name, NULL); } else if (origin == NULL) { /* Starting a resource */ pcmk__xe_set(xml, PCMK_XA_NODE, destination->priv->name); } else if (need_role && (destination == NULL)) { /* Stopping a promotable clone instance */ pcmk__xe_set_props(xml, PCMK_XA_ROLE, pcmk_role_text(rsc->priv->orig_role), PCMK_XA_NODE, origin->priv->name, NULL); } else if (destination == NULL) { /* Stopping a resource */ pcmk__xe_set(xml, PCMK_XA_NODE, origin->priv->name); } else if (need_role && same_role && same_host) { /* Recovering, restarting or re-promoting a promotable clone instance */ pcmk__xe_set_props(xml, PCMK_XA_ROLE, pcmk_role_text(rsc->priv->orig_role), PCMK_XA_SOURCE, origin->priv->name, NULL); } else if (same_role && same_host) { /* Recovering or Restarting a normal resource */ pcmk__xe_set(xml, PCMK_XA_SOURCE, origin->priv->name); } else if (need_role && same_role) { /* Moving a promotable clone instance */ pcmk__xe_set_props(xml, PCMK_XA_SOURCE, origin->priv->name, PCMK_XA_DEST, destination->priv->name, PCMK_XA_ROLE, pcmk_role_text(rsc->priv->orig_role), NULL); } else if (same_role) { /* Moving a normal resource */ pcmk__xe_set_props(xml, PCMK_XA_SOURCE, origin->priv->name, PCMK_XA_DEST, destination->priv->name, NULL); } else if (same_host) { /* Promoting or demoting a promotable clone instance */ pcmk__xe_set_props(xml, PCMK_XA_ROLE, pcmk_role_text(rsc->priv->orig_role), PCMK_XA_NEXT_ROLE, pcmk_role_text(rsc->priv->next_role), PCMK_XA_SOURCE, origin->priv->name, NULL); } else { /* Moving and promoting/demoting */ pcmk__xe_set_props(xml, PCMK_XA_ROLE, pcmk_role_text(rsc->priv->orig_role), PCMK_XA_SOURCE, origin->priv->name, PCMK_XA_NEXT_ROLE, pcmk_role_text(rsc->priv->next_role), PCMK_XA_DEST, destination->priv->name, NULL); } if ((source->reason != NULL) && !pcmk__is_set(action->flags, pcmk__action_runnable)) { pcmk__xe_set_props(xml, PCMK_XA_REASON, source->reason, PCMK_XA_BLOCKED, PCMK_VALUE_TRUE, NULL); } else if (source->reason != NULL) { pcmk__xe_set(xml, PCMK_XA_REASON, source->reason); } else if (!pcmk__is_set(action->flags, pcmk__action_runnable)) { - pcmk__xe_set_bool_attr(xml, PCMK_XA_BLOCKED, true); + pcmk__xe_set_bool(xml, PCMK_XA_BLOCKED, true); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pcmk_resource_t *", "bool") static int rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk__is_set(rsc->flags, pcmk__rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use * rsc->private->this_with_colocations directly rather than call * rsc->private->cmds->this_with_colocations(). */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->this_with_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; char *hdr = NULL; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources %s is colocated with", rsc->id); if (pcmk__is_set(cons->primary->flags, pcmk__rsc_detect_loop)) { out->list_item(out, NULL, "%s (id=%s - loop)", cons->primary->id, cons->id); continue; } hdr = colocations_header(cons->primary, cons, false); out->list_item(out, NULL, "%s", hdr); free(hdr); // Empty list header for indentation of information about this resource out->begin_list(out, NULL, NULL, NULL); out->message(out, "locations-list", cons->primary); if (recursive) { out->message(out, "rsc-is-colocated-with-list", cons->primary, recursive); } out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pcmk_resource_t *", "bool") static int rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk__is_set(rsc->flags, pcmk__rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use * rsc->private->this_with_colocations directly rather than call * rsc->private->cmds->this_with_colocations(). */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->this_with_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; if (pcmk__is_set(cons->primary->flags, pcmk__rsc_detect_loop)) { colocations_xml_node(out, cons->primary, cons); continue; } colocations_xml_node(out, cons->primary, cons); do_locations_list_xml(out, cons->primary, false); if (recursive) { out->message(out, "rsc-is-colocated-with-list", cons->primary, recursive); } } return rc; } PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pcmk_resource_t *", "bool") static int rscs_colocated_with_list(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk__is_set(rsc->flags, pcmk__rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use * rsc->private->with_this_colocations directly rather than * rsc->private->cmds->with_this_colocations(). */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->with_this_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; char *hdr = NULL; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s", rsc->id); if (pcmk__is_set(cons->dependent->flags, pcmk__rsc_detect_loop)) { out->list_item(out, NULL, "%s (id=%s - loop)", cons->dependent->id, cons->id); continue; } hdr = colocations_header(cons->dependent, cons, true); out->list_item(out, NULL, "%s", hdr); free(hdr); // Empty list header for indentation of information about this resource out->begin_list(out, NULL, NULL, NULL); out->message(out, "locations-list", cons->dependent); if (recursive) { out->message(out, "rscs-colocated-with-list", cons->dependent, recursive); } out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pcmk_resource_t *", "bool") static int rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); bool recursive = va_arg(args, int); int rc = pcmk_rc_no_output; if (pcmk__is_set(rsc->flags, pcmk__rsc_detect_loop)) { return rc; } /* We're listing constraints explicitly involving rsc, so use * rsc->private->with_this_colocations directly rather than * rsc->private->cmds->with_this_colocations(). */ pcmk__set_rsc_flags(rsc, pcmk__rsc_detect_loop); for (GList *lpc = rsc->priv->with_this_colocations; lpc != NULL; lpc = lpc->next) { pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data; if (pcmk__is_set(cons->dependent->flags, pcmk__rsc_detect_loop)) { colocations_xml_node(out, cons->dependent, cons); continue; } colocations_xml_node(out, cons->dependent, cons); do_locations_list_xml(out, cons->dependent, false); if (recursive) { out->message(out, "rscs-colocated-with-list", cons->dependent, recursive); } } return rc; } PCMK__OUTPUT_ARGS("locations-list", "pcmk_resource_t *") static int locations_list(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); GList *lpc = NULL; int rc = pcmk_rc_no_output; for (lpc = rsc->priv->location_constraints; lpc != NULL; lpc = lpc->next) { pcmk__location_t *cons = lpc->data; GList *lpc2 = NULL; for (lpc2 = cons->nodes; lpc2 != NULL; lpc2 = lpc2->next) { pcmk_node_t *node = (pcmk_node_t *) lpc2->data; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Locations"); out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)", pcmk__node_name(node), pcmk_readable_score(node->assign->score), cons->id, rsc->id); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("locations-list", "pcmk_resource_t *") static int locations_list_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); return do_locations_list_xml(out, rsc, true); } PCMK__OUTPUT_ARGS("locations-and-colocations", "pcmk_resource_t *", "bool", "bool") static int locations_and_colocations(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); bool recursive = va_arg(args, int); bool force = va_arg(args, int); pcmk__unpack_constraints(rsc->priv->scheduler); // Constraints apply to group/clone, not member/instance if (!force) { rsc = uber_parent(rsc); } out->message(out, "locations-list", rsc); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); out->message(out, "rscs-colocated-with-list", rsc, recursive); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); out->message(out, "rsc-is-colocated-with-list", rsc, recursive); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("locations-and-colocations", "pcmk_resource_t *", "bool", "bool") static int locations_and_colocations_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); bool recursive = va_arg(args, int); bool force = va_arg(args, int); pcmk__unpack_constraints(rsc->priv->scheduler); // Constraints apply to group/clone, not member/instance if (!force) { rsc = uber_parent(rsc); } pcmk__output_xml_create_parent(out, PCMK_XE_CONSTRAINTS, NULL); do_locations_list_xml(out, rsc, false); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); out->message(out, "rscs-colocated-with-list", rsc, recursive); pe__clear_resource_flags_on_all(rsc->priv->scheduler, pcmk__rsc_detect_loop); out->message(out, "rsc-is-colocated-with-list", rsc, recursive); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health(pcmk__output_t *out, va_list args) { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); const char *host_from = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result = va_arg(args, const char *); return out->info(out, "Controller on %s in state %s: %s", pcmk__s(host_from, "unknown node"), pcmk__s(fsa_state, "unknown"), pcmk__s(result, "unknown result")); } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return health(out, args); } else { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); const char *host_from G_GNUC_UNUSED = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result G_GNUC_UNUSED = va_arg(args, const char *); if (fsa_state != NULL) { pcmk__formatted_printf(out, "%s\n", fsa_state); return pcmk_rc_ok; } } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *") static int health_xml(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); const char *host_from = va_arg(args, const char *); const char *fsa_state = va_arg(args, const char *); const char *result = va_arg(args, const char *); pcmk__output_create_xml_node(out, pcmk__s(sys_from, ""), PCMK_XA_NODE_NAME, pcmk__s(host_from, ""), PCMK_XA_STATE, pcmk__s(fsa_state, ""), PCMK_XA_RESULT, pcmk__s(result, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = va_arg(args, time_t); char *last_updated_s = NULL; int rc = pcmk_rc_ok; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = PCMK__SERVER_REMOTED; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk__pcmkd_state_enum2friendly(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } rc = out->info(out, "Status of %s: '%s' (last updated %s)", sys_from, state_s, pcmk__s(last_updated_s, "at unknown time")); free(last_updated_s); return rc; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health_html(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = va_arg(args, time_t); char *last_updated_s = NULL; char *msg = NULL; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = PCMK__SERVER_REMOTED; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk__pcmkd_state_enum2friendly(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } msg = pcmk__assert_asprintf("Status of %s: '%s' (last updated %s)", sys_from, state_s, pcmk__s(last_updated_s, "at unknown time")); pcmk__output_create_html_node(out, "li", NULL, NULL, msg); free(msg); free(last_updated_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return pacemakerd_health(out, args); } else { const char *sys_from G_GNUC_UNUSED = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated G_GNUC_UNUSED = va_arg(args, time_t); if (state_s == NULL) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state); } pcmk__formatted_printf(out, "%s\n", state_s); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("pacemakerd-health", "const char *", "enum pcmk_pacemakerd_state", "const char *", "time_t") static int pacemakerd_health_xml(pcmk__output_t *out, va_list args) { const char *sys_from = va_arg(args, const char *); enum pcmk_pacemakerd_state state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = va_arg(args, const char *); time_t last_updated = va_arg(args, time_t); char *last_updated_s = NULL; if (sys_from == NULL) { if (state == pcmk_pacemakerd_state_remote) { sys_from = PCMK__SERVER_REMOTED; } else { sys_from = CRM_SYSTEM_MCP; } } if (state_s == NULL) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(state); } if (last_updated != 0) { last_updated_s = pcmk__epoch2str(&last_updated, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); } pcmk__output_create_xml_node(out, PCMK_XE_PACEMAKERD, PCMK_XA_SYS_FROM, sys_from, PCMK_XA_STATE, state_s, PCMK_XA_LAST_UPDATED, last_updated_s, NULL); free(last_updated_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("profile", "const char *", "clock_t", "clock_t") static int profile_default(pcmk__output_t *out, va_list args) { const char *xml_file = va_arg(args, const char *); clock_t start = va_arg(args, clock_t); clock_t end = va_arg(args, clock_t); out->list_item(out, NULL, "Testing %s ... %.2f secs", xml_file, (end - start) / (float) CLOCKS_PER_SEC); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("profile", "const char *", "clock_t", "clock_t") static int profile_xml(pcmk__output_t *out, va_list args) { const char *xml_file = va_arg(args, const char *); clock_t start = va_arg(args, clock_t); clock_t end = va_arg(args, clock_t); char *duration = pcmk__ftoa((end - start) / (float) CLOCKS_PER_SEC); pcmk__output_create_xml_node(out, PCMK_XE_TIMING, PCMK_XA_FILE, xml_file, PCMK_XA_DURATION, duration, NULL); free(duration); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc(pcmk__output_t *out, va_list args) { const char *dc = va_arg(args, const char *); return out->info(out, "Designated Controller is: %s", pcmk__s(dc, "not yet elected")); } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return dc(out, args); } else { const char *dc = va_arg(args, const char *); if (dc != NULL) { pcmk__formatted_printf(out, "%s\n", pcmk__s(dc, "")); return pcmk_rc_ok; } } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("dc", "const char *") static int dc_xml(pcmk__output_t *out, va_list args) { const char *dc = va_arg(args, const char *); pcmk__output_create_xml_node(out, PCMK_XE_DC, PCMK_XA_NODE_NAME, pcmk__s(dc, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node(pcmk__output_t *out, va_list args) { const char *type = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id = va_arg(args, const char *); bool bash_export = va_arg(args, int); if (bash_export) { return out->info(out, "export %s=%s", pcmk__s(name, ""), pcmk__s(id, "")); } else { return out->info(out, "%s node: %s (%s)", type ? type : "cluster", pcmk__s(name, ""), pcmk__s(id, "")); } } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node_text(pcmk__output_t *out, va_list args) { if (!out->is_quiet(out)) { return crmadmin_node(out, args); } else { const char *type G_GNUC_UNUSED = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id G_GNUC_UNUSED = va_arg(args, const char *); bool bash_export G_GNUC_UNUSED = va_arg(args, int); pcmk__formatted_printf(out, "%s\n", pcmk__s(name, "")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool") static int crmadmin_node_xml(pcmk__output_t *out, va_list args) { const char *type = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *id = va_arg(args, const char *); bool bash_export G_GNUC_UNUSED = va_arg(args, int); pcmk__output_create_xml_node(out, PCMK_XE_NODE, PCMK_XA_TYPE, pcmk__s(type, "cluster"), PCMK_XA_NAME, pcmk__s(name, ""), PCMK_XA_ID, pcmk__s(id, ""), NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("digests", "const pcmk_resource_t *", "const pcmk_node_t *", "const char *", "guint", "const pcmk__op_digest_t *") static int digests_text(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); const pcmk_node_t *node = va_arg(args, const pcmk_node_t *); const char *task = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); const pcmk__op_digest_t *digests = va_arg(args, const pcmk__op_digest_t *); char *action_desc = NULL; const char *rsc_desc = "unknown resource"; const char *node_desc = "unknown node"; if (interval_ms != 0) { action_desc = pcmk__assert_asprintf("%ums-interval %s action", interval_ms, pcmk__s(task, "unknown")); } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none)) { action_desc = strdup("probe action"); } else { action_desc = pcmk__assert_asprintf("%s action", pcmk__s(task, "unknown")); } if ((rsc != NULL) && (rsc->id != NULL)) { rsc_desc = rsc->id; } if ((node != NULL) && (node->priv->name != NULL)) { node_desc = node->priv->name; } out->begin_list(out, NULL, NULL, "Digests for %s %s on %s", rsc_desc, action_desc, node_desc); free(action_desc); if (digests == NULL) { out->list_item(out, NULL, "none"); out->end_list(out); return pcmk_rc_ok; } if (digests->digest_all_calc != NULL) { out->list_item(out, NULL, "%s (all parameters)", digests->digest_all_calc); } if (digests->digest_secure_calc != NULL) { out->list_item(out, NULL, "%s (non-private parameters)", digests->digest_secure_calc); } if (digests->digest_restart_calc != NULL) { out->list_item(out, NULL, "%s (non-reloadable parameters)", digests->digest_restart_calc); } out->end_list(out); return pcmk_rc_ok; } static void add_digest_xml(xmlNode *parent, const char *type, const char *digest, xmlNode *digest_source) { if (digest != NULL) { xmlNodePtr digest_xml = pcmk__xe_create(parent, PCMK_XE_DIGEST); pcmk__xe_set(digest_xml, PCMK_XA_TYPE, pcmk__s(type, "unspecified")); pcmk__xe_set(digest_xml, PCMK_XA_HASH, digest); pcmk__xml_copy(digest_xml, digest_source); } } PCMK__OUTPUT_ARGS("digests", "const pcmk_resource_t *", "const pcmk_node_t *", "const char *", "guint", "const pcmk__op_digest_t *") static int digests_xml(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); const pcmk_node_t *node = va_arg(args, const pcmk_node_t *); const char *task = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); const pcmk__op_digest_t *digests = va_arg(args, const pcmk__op_digest_t *); char *interval_s = pcmk__assert_asprintf("%ums", interval_ms); xmlNode *xml = NULL; xml = pcmk__output_create_xml_node(out, PCMK_XE_DIGESTS, PCMK_XA_RESOURCE, pcmk__s(rsc->id, ""), PCMK_XA_NODE, pcmk__s(node->priv->name, ""), PCMK_XA_TASK, pcmk__s(task, ""), PCMK_XA_INTERVAL, interval_s, NULL); free(interval_s); if (digests != NULL) { add_digest_xml(xml, "all", digests->digest_all_calc, digests->params_all); add_digest_xml(xml, "nonprivate", digests->digest_secure_calc, digests->params_secure); add_digest_xml(xml, "nonreloadable", digests->digest_restart_calc, digests->params_restart); } return pcmk_rc_ok; } #define STOP_SANITY_ASSERT(lineno) do { \ if ((current != NULL) && current->details->unclean) { \ /* It will be a pseudo op */ \ } else if (stop == NULL) { \ crm_err("%s:%d: No stop action exists for %s", \ __func__, lineno, rsc->id); \ pcmk__assert(stop != NULL); \ } else if (pcmk__is_set(stop->flags, pcmk__action_optional)) { \ crm_err("%s:%d: Action %s is still optional", \ __func__, lineno, stop->uuid); \ pcmk__assert(!pcmk__is_set(stop->flags, \ pcmk__action_optional)); \ } \ } while (0) PCMK__OUTPUT_ARGS("rsc-action", "pcmk_resource_t *", "pcmk_node_t *", "pcmk_node_t *") static int rsc_action_default(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *current = va_arg(args, pcmk_node_t *); pcmk_node_t *next = va_arg(args, pcmk_node_t *); GList *possible_matches = NULL; char *key = NULL; int rc = pcmk_rc_no_output; bool moving = false; pcmk_node_t *start_node = NULL; pcmk_action_t *start = NULL; pcmk_action_t *stop = NULL; pcmk_action_t *promote = NULL; pcmk_action_t *demote = NULL; pcmk_action_t *reason_op = NULL; if (!pcmk__is_set(rsc->flags, pcmk__rsc_managed) || ((current == NULL) && (next == NULL))) { const bool managed = pcmk__is_set(rsc->flags, pcmk__rsc_managed); pcmk__rsc_info(rsc, "Leave %s\t(%s%s)", rsc->id, pcmk_role_text(rsc->priv->orig_role), (managed? "" : " unmanaged")); return rc; } moving = (current != NULL) && (next != NULL) && !pcmk__same_node(current, next); possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_START, false); if (possible_matches) { start = possible_matches->data; g_list_free(possible_matches); } if ((start == NULL) || !pcmk__is_set(start->flags, pcmk__action_runnable)) { start_node = NULL; } else { start_node = current; } possible_matches = pe__resource_actions(rsc, start_node, PCMK_ACTION_STOP, false); if (possible_matches) { stop = possible_matches->data; g_list_free(possible_matches); } else if (pcmk__is_set(rsc->flags, pcmk__rsc_stop_unexpected)) { /* The resource is multiply active with PCMK_META_MULTIPLE_ACTIVE set to * PCMK_VALUE_STOP_UNEXPECTED, and not stopping on its current node, but * it should be stopping elsewhere. */ possible_matches = pe__resource_actions(rsc, NULL, PCMK_ACTION_STOP, false); if (possible_matches != NULL) { stop = possible_matches->data; g_list_free(possible_matches); } } possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_PROMOTE, false); if (possible_matches) { promote = possible_matches->data; g_list_free(possible_matches); } possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_DEMOTE, false); if (possible_matches) { demote = possible_matches->data; g_list_free(possible_matches); } if (rsc->priv->orig_role == rsc->priv->next_role) { pcmk_action_t *migrate_op = NULL; CRM_CHECK(next != NULL, return rc); possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_MIGRATE_FROM, false); if (possible_matches) { migrate_op = possible_matches->data; } if ((migrate_op != NULL) && (current != NULL) && pcmk__is_set(migrate_op->flags, pcmk__action_runnable)) { rc = out->message(out, "rsc-action-item", "Migrate", rsc, current, next, start, NULL); } else if (pcmk__is_set(rsc->flags, pcmk__rsc_reload)) { rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next, start, NULL); } else if ((start == NULL) || pcmk__is_set(start->flags, pcmk__action_optional)) { if ((demote != NULL) && (promote != NULL) && !pcmk__is_set(demote->flags, pcmk__action_optional) && !pcmk__is_set(promote->flags, pcmk__action_optional)) { rc = out->message(out, "rsc-action-item", "Re-promote", rsc, current, next, promote, demote); } else { pcmk__rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, pcmk_role_text(rsc->priv->orig_role), pcmk__node_name(next)); } } else if (!pcmk__is_set(start->flags, pcmk__action_runnable)) { if ((stop == NULL) || (stop->reason == NULL)) { reason_op = start; } else { reason_op = stop; } rc = out->message(out, "rsc-action-item", "Stop", rsc, current, NULL, stop, reason_op); STOP_SANITY_ASSERT(__LINE__); } else if (moving && current) { const bool failed = pcmk__is_set(rsc->flags, pcmk__rsc_failed); rc = out->message(out, "rsc-action-item", (failed? "Recover" : "Move"), rsc, current, next, stop, NULL); } else if (pcmk__is_set(rsc->flags, pcmk__rsc_failed)) { rc = out->message(out, "rsc-action-item", "Recover", rsc, current, NULL, stop, NULL); STOP_SANITY_ASSERT(__LINE__); } else { rc = out->message(out, "rsc-action-item", "Restart", rsc, current, next, start, NULL); #if 0 /* @TODO This can be reached in situations that should really be * "Start" (see for example the migrate-fail-7 regression test) */ STOP_SANITY_ASSERT(__LINE__); #endif } g_list_free(possible_matches); return rc; } if ((stop != NULL) && ((rsc->priv->next_role == pcmk_role_stopped) || ((start != NULL) && !pcmk__is_set(start->flags, pcmk__action_runnable)))) { key = stop_key(rsc); for (GList *iter = rsc->priv->active_nodes; iter != NULL; iter = iter->next) { pcmk_node_t *node = iter->data; pcmk_action_t *stop_op = NULL; reason_op = start; possible_matches = find_actions(rsc->priv->actions, key, node); if (possible_matches) { stop_op = possible_matches->data; g_list_free(possible_matches); } if (stop_op != NULL) { if (pcmk__is_set(stop_op->flags, pcmk__action_runnable)) { STOP_SANITY_ASSERT(__LINE__); } if (stop_op->reason != NULL) { reason_op = stop_op; } } if (out->message(out, "rsc-action-item", "Stop", rsc, node, NULL, stop_op, reason_op) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } free(key); } else if ((stop != NULL) && pcmk__all_flags_set(rsc->flags, pcmk__rsc_failed |pcmk__rsc_stop_if_failed)) { /* 'stop' may be NULL if the failure was ignored */ rc = out->message(out, "rsc-action-item", "Recover", rsc, current, next, stop, start); STOP_SANITY_ASSERT(__LINE__); } else if (moving) { rc = out->message(out, "rsc-action-item", "Move", rsc, current, next, stop, NULL); STOP_SANITY_ASSERT(__LINE__); } else if (pcmk__is_set(rsc->flags, pcmk__rsc_reload)) { rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next, start, NULL); } else if ((stop != NULL) && !pcmk__is_set(stop->flags, pcmk__action_optional)) { rc = out->message(out, "rsc-action-item", "Restart", rsc, current, next, start, NULL); STOP_SANITY_ASSERT(__LINE__); } else if (rsc->priv->orig_role == pcmk_role_promoted) { CRM_LOG_ASSERT(current != NULL); rc = out->message(out, "rsc-action-item", "Demote", rsc, current, next, demote, NULL); } else if (rsc->priv->next_role == pcmk_role_promoted) { CRM_LOG_ASSERT(next); rc = out->message(out, "rsc-action-item", "Promote", rsc, current, next, promote, NULL); } else if ((rsc->priv->orig_role == pcmk_role_stopped) && (rsc->priv->next_role > pcmk_role_stopped)) { rc = out->message(out, "rsc-action-item", "Start", rsc, current, next, start, NULL); } return rc; } PCMK__OUTPUT_ARGS("node-action", "const char *", "const char *", "const char *") static int node_action(pcmk__output_t *out, va_list args) { const char *task = va_arg(args, const char *); const char *node_name = va_arg(args, const char *); const char *reason = va_arg(args, const char *); if (task == NULL) { return pcmk_rc_no_output; } else if (reason) { out->list_item(out, NULL, "%s %s '%s'", task, node_name, reason); } else { crm_notice(" * %s %s", task, node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-action", "const char *", "const char *", "const char *") static int node_action_xml(pcmk__output_t *out, va_list args) { const char *task = va_arg(args, const char *); const char *node_name = va_arg(args, const char *); const char *reason = va_arg(args, const char *); if (task == NULL) { return pcmk_rc_no_output; } else if (reason) { pcmk__output_create_xml_node(out, PCMK_XE_NODE_ACTION, PCMK_XA_TASK, task, PCMK_XA_NODE, node_name, PCMK_XA_REASON, reason, NULL); } else { crm_notice(" * %s %s", task, node_name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *", "const char *", "bool", "bool") static int node_info_default(pcmk__output_t *out, va_list args) { uint32_t node_id = va_arg(args, uint32_t); const char *node_name = va_arg(args, const char *); const char *uuid = va_arg(args, const char *); const char *state = va_arg(args, const char *); bool have_quorum = (bool) va_arg(args, int); bool is_remote = (bool) va_arg(args, int); return out->info(out, "Node %" PRIu32 ": %s " "(uuid=%s, state=%s, have_quorum=%s, is_remote=%s)", node_id, pcmk__s(node_name, "unknown"), pcmk__s(uuid, "unknown"), pcmk__s(state, "unknown"), pcmk__btoa(have_quorum), pcmk__btoa(is_remote)); } PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *", "const char *", "bool", "bool") static int node_info_xml(pcmk__output_t *out, va_list args) { uint32_t node_id = va_arg(args, uint32_t); const char *node_name = va_arg(args, const char *); const char *uuid = va_arg(args, const char *); const char *state = va_arg(args, const char *); bool have_quorum = (bool) va_arg(args, int); bool is_remote = (bool) va_arg(args, int); char *id_s = pcmk__assert_asprintf("%" PRIu32, node_id); pcmk__output_create_xml_node(out, PCMK_XE_NODE_INFO, PCMK_XA_NODEID, id_s, PCMK_XA_UNAME, node_name, PCMK_XA_ID, uuid, PCMK_XA_CRMD, state, PCMK_XA_HAVE_QUORUM, pcmk__btoa(have_quorum), PCMK_XA_REMOTE_NODE, pcmk__btoa(is_remote), NULL); free(id_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNode *") static int inject_cluster_action(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr rsc = va_arg(args, xmlNodePtr); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (rsc != NULL) { out->list_item(out, NULL, "Cluster action: %s for %s on %s", task, pcmk__xe_id(rsc), node); } else { out->list_item(out, NULL, "Cluster action: %s on %s", task, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNode *") static int inject_cluster_action_xml(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr rsc = va_arg(args, xmlNodePtr); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, PCMK_XE_CLUSTER_ACTION, PCMK_XA_TASK, task, PCMK_XA_NODE, node, NULL); if (rsc) { pcmk__xe_set(xml_node, PCMK_XA_ID, pcmk__xe_id(rsc)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-fencing-action", "const char *", "const char *") static int inject_fencing_action(pcmk__output_t *out, va_list args) { const char *target = va_arg(args, const char *); const char *op = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Fencing %s (%s)", target, op); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-fencing-action", "const char *", "const char *") static int inject_fencing_action_xml(pcmk__output_t *out, va_list args) { const char *target = va_arg(args, const char *); const char *op = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, PCMK_XE_FENCING_ACTION, PCMK_XA_TARGET, target, PCMK_XA_OP, op, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-attr", "const char *", "const char *", "xmlNode *") static int inject_attr(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr cib_node = va_arg(args, xmlNodePtr); xmlChar *node_path = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node_path = xmlGetNodePath(cib_node); out->list_item(out, NULL, "Injecting attribute %s=%s into %s '%s'", name, value, node_path, pcmk__xe_id(cib_node)); free(node_path); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-attr", "const char *", "const char *", "xmlNode *") static int inject_attr_xml(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr cib_node = va_arg(args, xmlNodePtr); xmlChar *node_path = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node_path = xmlGetNodePath(cib_node); pcmk__output_create_xml_node(out, PCMK_XE_INJECT_ATTR, PCMK_XA_NAME, name, PCMK_XA_VALUE, value, PCMK_XA_NODE_PATH, node_path, PCMK_XA_CIB_NODE, pcmk__xe_id(cib_node), NULL); free(node_path); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-spec", "const char *") static int inject_spec(pcmk__output_t *out, va_list args) { const char *spec = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Injecting %s into the configuration", spec); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-spec", "const char *") static int inject_spec_xml(pcmk__output_t *out, va_list args) { const char *spec = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, PCMK_XE_INJECT_SPEC, PCMK_XA_SPEC, spec, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-config", "const char *", "const char *") static int inject_modify_config(pcmk__output_t *out, va_list args) { const char *quorum = va_arg(args, const char *); const char *watchdog = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Performing Requested Modifications"); if (quorum) { out->list_item(out, NULL, "Setting quorum: %s", quorum); } if (watchdog) { out->list_item(out, NULL, "Setting watchdog: %s", watchdog); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-config", "const char *", "const char *") static int inject_modify_config_xml(pcmk__output_t *out, va_list args) { const char *quorum = va_arg(args, const char *); const char *watchdog = va_arg(args, const char *); xmlNodePtr node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } node = pcmk__output_xml_create_parent(out, PCMK_XE_MODIFICATIONS, NULL); if (quorum) { pcmk__xe_set(node, PCMK_XA_QUORUM, quorum); } if (watchdog) { pcmk__xe_set(node, PCMK_XA_WATCHDOG, watchdog); } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-node", "const char *", "const char *") static int inject_modify_node(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *node = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (pcmk__str_eq(action, "Online", pcmk__str_none)) { out->list_item(out, NULL, "Bringing node %s online", node); return pcmk_rc_ok; } else if (pcmk__str_eq(action, "Offline", pcmk__str_none)) { out->list_item(out, NULL, "Taking node %s offline", node); return pcmk_rc_ok; } else if (pcmk__str_eq(action, "Failing", pcmk__str_none)) { out->list_item(out, NULL, "Failing node %s", node); return pcmk_rc_ok; } return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("inject-modify-node", "const char *", "const char *") static int inject_modify_node_xml(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *node = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, PCMK_XE_MODIFY_NODE, PCMK_XA_ACTION, action, PCMK_XA_NODE, node, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-ticket", "const char *", "const char *") static int inject_modify_ticket(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *ticket = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (pcmk__str_eq(action, "Standby", pcmk__str_none)) { out->list_item(out, NULL, "Making ticket %s standby", ticket); } else { out->list_item(out, NULL, "%s ticket %s", action, ticket); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-modify-ticket", "const char *", "const char *") static int inject_modify_ticket_xml(pcmk__output_t *out, va_list args) { const char *action = va_arg(args, const char *); const char *ticket = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } pcmk__output_create_xml_node(out, PCMK_XE_MODIFY_TICKET, PCMK_XA_ACTION, action, PCMK_XA_TICKET, ticket, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-pseudo-action", "const char *", "const char *") static int inject_pseudo_action(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); if (out->is_quiet(out)) { return pcmk_rc_no_output; } out->list_item(out, NULL, "Pseudo action: %s%s%s", task, ((node == NULL)? "" : " on "), pcmk__s(node, "")); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-pseudo-action", "const char *", "const char *") static int inject_pseudo_action_xml(pcmk__output_t *out, va_list args) { const char *node = va_arg(args, const char *); const char *task = va_arg(args, const char *); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, PCMK_XE_PSEUDO_ACTION, PCMK_XA_TASK, task, NULL); if (node) { pcmk__xe_set(xml_node, PCMK_XA_NODE, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-rsc-action", "const char *", "const char *", "const char *", "guint") static int inject_rsc_action(pcmk__output_t *out, va_list args) { const char *rsc = va_arg(args, const char *); const char *operation = va_arg(args, const char *); const char *node = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); if (out->is_quiet(out)) { return pcmk_rc_no_output; } if (interval_ms) { out->list_item(out, NULL, "Resource action: %-15s %s=%u on %s", rsc, operation, interval_ms, node); } else { out->list_item(out, NULL, "Resource action: %-15s %s on %s", rsc, operation, node); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("inject-rsc-action", "const char *", "const char *", "const char *", "guint") static int inject_rsc_action_xml(pcmk__output_t *out, va_list args) { const char *rsc = va_arg(args, const char *); const char *operation = va_arg(args, const char *); const char *node = va_arg(args, const char *); guint interval_ms = va_arg(args, guint); xmlNodePtr xml_node = NULL; if (out->is_quiet(out)) { return pcmk_rc_no_output; } xml_node = pcmk__output_create_xml_node(out, PCMK_XE_RSC_ACTION, PCMK_XA_RESOURCE, rsc, PCMK_XA_OP, operation, PCMK_XA_NODE, node, NULL); if (interval_ms) { char *interval_s = pcmk__itoa(interval_ms); pcmk__xe_set(xml_node, PCMK_XA_INTERVAL, interval_s); free(interval_s); } return pcmk_rc_ok; } #define CHECK_RC(retcode, retval) \ if (retval == pcmk_rc_ok) { \ retcode = pcmk_rc_ok; \ } PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") int pcmk__cluster_status_text(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); int rc = pcmk_rc_no_output; bool already_printed_failure = false; CHECK_RC(rc, out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts, show_opts)); if (pcmk__is_set(section_opts, pcmk_section_nodes) && unames) { CHECK_RC(rc, out->message(out, "node-list", scheduler->nodes, unames, resources, show_opts, rc == pcmk_rc_ok)); } /* Print resources section, if needed */ if (pcmk__is_set(section_opts, pcmk_section_resources)) { CHECK_RC(rc, out->message(out, "resource-list", scheduler, show_opts, true, unames, resources, rc == pcmk_rc_ok)); } /* print Node Attributes section if requested */ if (pcmk__is_set(section_opts, pcmk_section_attributes)) { CHECK_RC(rc, out->message(out, "node-attribute-list", scheduler, show_opts, (rc == pcmk_rc_ok), unames, resources)); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk__any_flags_set(section_opts, pcmk_section_operations|pcmk_section_failcounts)) { CHECK_RC(rc, out->message(out, "node-summary", scheduler, unames, resources, section_opts, show_opts, (rc == pcmk_rc_ok))); } /* If there were any failed actions, print them */ if (pcmk__is_set(section_opts, pcmk_section_failures) && (scheduler->priv->failed != NULL) && (scheduler->priv->failed->children != NULL)) { CHECK_RC(rc, out->message(out, "failed-action-list", scheduler, unames, resources, show_opts, rc == pcmk_rc_ok)); } /* Print failed stonith actions */ if (pcmk__is_set(section_opts, pcmk_section_fence_failed) && fence_history != pcmk__fence_history_none) { if (history_rc == 0) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "failed-fencing-list", stonith_history, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } else { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); already_printed_failure = true; } } /* Print tickets if requested */ if (pcmk__is_set(section_opts, pcmk_section_tickets)) { CHECK_RC(rc, out->message(out, "ticket-list", scheduler->priv->ticket_constraints, (rc == pcmk_rc_ok), false, false)); } /* Print negative location constraints if requested */ if (pcmk__is_set(section_opts, pcmk_section_bans)) { CHECK_RC(rc, out->message(out, "ban-list", scheduler, prefix, resources, show_opts, rc == pcmk_rc_ok)); } /* Print stonith history */ if (pcmk__any_flags_set(section_opts, pcmk_section_fencing_all) && (fence_history != pcmk__fence_history_none)) { if (history_rc != 0) { if (!already_printed_failure) { PCMK__OUTPUT_SPACER_IF(out, rc == pcmk_rc_ok); out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } else if (pcmk__is_set(section_opts, pcmk_section_fence_worked)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { CHECK_RC(rc, out->message(out, "fencing-list", hp, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } else if (pcmk__is_set(section_opts, pcmk_section_fence_pending)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { CHECK_RC(rc, out->message(out, "pending-fencing-list", hp, unames, section_opts, show_opts, rc == pcmk_rc_ok)); } } } return rc; } PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") static int cluster_status_xml(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts, show_opts); /*** NODES ***/ if (pcmk__is_set(section_opts, pcmk_section_nodes)) { out->message(out, "node-list", scheduler->nodes, unames, resources, show_opts, false); } /* Print resources section, if needed */ if (pcmk__is_set(section_opts, pcmk_section_resources)) { /* XML output always displays full details. */ uint32_t full_show_opts = show_opts & ~pcmk_show_brief; out->message(out, "resource-list", scheduler, full_show_opts, false, unames, resources, false); } /* print Node Attributes section if requested */ if (pcmk__is_set(section_opts, pcmk_section_attributes)) { out->message(out, "node-attribute-list", scheduler, show_opts, false, unames, resources); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk__any_flags_set(section_opts, pcmk_section_operations|pcmk_section_failcounts)) { out->message(out, "node-summary", scheduler, unames, resources, section_opts, show_opts, false); } /* If there were any failed actions, print them */ if (pcmk__is_set(section_opts, pcmk_section_failures) && (scheduler->priv->failed != NULL) && (scheduler->priv->failed->children != NULL)) { out->message(out, "failed-action-list", scheduler, unames, resources, show_opts, false); } /* Print stonith history */ if (pcmk__is_set(section_opts, pcmk_section_fencing_all) && (fence_history != pcmk__fence_history_none)) { out->message(out, "full-fencing-list", history_rc, stonith_history, unames, section_opts, show_opts, false); } /* Print tickets if requested */ if (pcmk__is_set(section_opts, pcmk_section_tickets)) { out->message(out, "ticket-list", scheduler->priv->ticket_constraints, false, false, false); } /* Print negative location constraints if requested */ if (pcmk__is_set(section_opts, pcmk_section_bans)) { out->message(out, "ban-list", scheduler, prefix, resources, show_opts, false); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "crm_exit_t", "stonith_history_t *", "enum pcmk__fence_history", "uint32_t", "uint32_t", "const char *", "GList *", "GList *") static int cluster_status_html(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); crm_exit_t history_rc = va_arg(args, crm_exit_t); stonith_history_t *stonith_history = va_arg(args, stonith_history_t *); enum pcmk__fence_history fence_history = va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); const char *prefix = va_arg(args, const char *); GList *unames = va_arg(args, GList *); GList *resources = va_arg(args, GList *); bool already_printed_failure = false; out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts, show_opts); /*** NODE LIST ***/ if (pcmk__is_set(section_opts, pcmk_section_nodes) && (unames != NULL)) { out->message(out, "node-list", scheduler->nodes, unames, resources, show_opts, false); } /* Print resources section, if needed */ if (pcmk__is_set(section_opts, pcmk_section_resources)) { out->message(out, "resource-list", scheduler, show_opts, true, unames, resources, false); } /* print Node Attributes section if requested */ if (pcmk__is_set(section_opts, pcmk_section_attributes)) { out->message(out, "node-attribute-list", scheduler, show_opts, false, unames, resources); } /* If requested, print resource operations (which includes failcounts) * or just failcounts */ if (pcmk__any_flags_set(section_opts, pcmk_section_operations|pcmk_section_failcounts)) { out->message(out, "node-summary", scheduler, unames, resources, section_opts, show_opts, false); } /* If there were any failed actions, print them */ if (pcmk__is_set(section_opts, pcmk_section_failures) && (scheduler->priv->failed != NULL) && (scheduler->priv->failed->children != NULL)) { out->message(out, "failed-action-list", scheduler, unames, resources, show_opts, false); } /* Print failed stonith actions */ if (pcmk__is_set(section_opts, pcmk_section_fence_failed) && (fence_history != pcmk__fence_history_none)) { if (history_rc == 0) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "failed-fencing-list", stonith_history, unames, section_opts, show_opts, false); } } else { out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } /* Print stonith history */ if (pcmk__any_flags_set(section_opts, pcmk_section_fencing_all) && (fence_history != pcmk__fence_history_none)) { if (history_rc != 0) { if (!already_printed_failure) { out->begin_list(out, NULL, NULL, "Failed Fencing Actions"); out->list_item(out, NULL, "Failed to get fencing history: %s", crm_exit_str(history_rc)); out->end_list(out); } } else if (pcmk__is_set(section_opts, pcmk_section_fence_worked)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq, GINT_TO_POINTER(st_failed)); if (hp) { out->message(out, "fencing-list", hp, unames, section_opts, show_opts, false); } } else if (pcmk__is_set(section_opts, pcmk_section_fence_pending)) { stonith_history_t *hp = NULL; hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL); if (hp) { out->message(out, "pending-fencing-list", hp, unames, section_opts, show_opts, false); } } } /* Print tickets if requested */ if (pcmk__is_set(section_opts, pcmk_section_tickets)) { out->message(out, "ticket-list", scheduler->priv->ticket_constraints, false, false, false); } /* Print negative location constraints if requested */ if (pcmk__is_set(section_opts, pcmk_section_bans)) { out->message(out, "ban-list", scheduler, prefix, resources, show_opts, false); } return pcmk_rc_ok; } #define KV_PAIR(k, v) do { \ if (legacy) { \ pcmk__g_strcat(s, k "=", pcmk__s(v, ""), " ", NULL); \ } else { \ pcmk__g_strcat(s, k "=\"", pcmk__s(v, ""), "\" ", NULL); \ } \ } while (0) PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *", "const char *", "const char *", "bool", "bool") static int attribute_default(pcmk__output_t *out, va_list args) { const char *scope = va_arg(args, const char *); const char *instance = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); const char *host = va_arg(args, const char *); bool quiet = va_arg(args, int); bool legacy = va_arg(args, int); gchar *value_esc = NULL; GString *s = NULL; if (quiet) { if (value != NULL) { /* Quiet needs to be turned off for ->info() to do anything */ bool was_quiet = out->is_quiet(out); if (was_quiet) { out->quiet = false; } out->info(out, "%s", value); out->quiet = was_quiet; } return pcmk_rc_ok; } s = g_string_sized_new(50); if (pcmk__xml_needs_escape(value, pcmk__xml_escape_attr_pretty)) { value_esc = pcmk__xml_escape(value, pcmk__xml_escape_attr_pretty); value = value_esc; } if (!pcmk__str_empty(scope)) { KV_PAIR(PCMK_XA_SCOPE, scope); } if (!pcmk__str_empty(instance)) { KV_PAIR(PCMK_XA_ID, instance); } KV_PAIR(PCMK_XA_NAME, name); if (!pcmk__str_empty(host)) { KV_PAIR(PCMK_XA_HOST, host); } if (legacy) { pcmk__g_strcat(s, PCMK_XA_VALUE "=", pcmk__s(value, "(null)"), NULL); } else { pcmk__g_strcat(s, PCMK_XA_VALUE "=\"", pcmk__s(value, ""), "\"", NULL); } out->info(out, "%s", s->str); g_free(value_esc); g_string_free(s, TRUE); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute", "const char *", "const char *", "const char *", "const char *", "const char *", "bool", "bool") static int attribute_xml(pcmk__output_t *out, va_list args) { const char *scope = va_arg(args, const char *); const char *instance = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); const char *host = va_arg(args, const char *); bool quiet G_GNUC_UNUSED = va_arg(args, int); bool legacy G_GNUC_UNUSED = va_arg(args, int); xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, PCMK_XE_ATTRIBUTE, PCMK_XA_NAME, name, PCMK_XA_VALUE, pcmk__s(value, ""), NULL); if (!pcmk__str_empty(scope)) { pcmk__xe_set(node, PCMK_XA_SCOPE, scope); } if (!pcmk__str_empty(instance)) { pcmk__xe_set(node, PCMK_XA_ID, instance); } if (!pcmk__str_empty(host)) { pcmk__xe_set(node, PCMK_XA_HOST, host); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("rule-check", "const char *", "int", "const char *") static int rule_check_default(pcmk__output_t *out, va_list args) { const char *rule_id = va_arg(args, const char *); int result = va_arg(args, int); const char *error = va_arg(args, const char *); switch (result) { case pcmk_rc_within_range: return out->info(out, "Rule %s is still in effect", rule_id); case pcmk_rc_ok: return out->info(out, "Rule %s satisfies conditions", rule_id); case pcmk_rc_after_range: return out->info(out, "Rule %s is expired", rule_id); case pcmk_rc_before_range: return out->info(out, "Rule %s has not yet taken effect", rule_id); case pcmk_rc_op_unsatisfied: return out->info(out, "Rule %s does not satisfy conditions", rule_id); default: out->err(out, "Could not determine whether rule %s is in effect: %s", rule_id, ((error != NULL)? error : "unexpected error")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("rule-check", "const char *", "int", "const char *") static int rule_check_xml(pcmk__output_t *out, va_list args) { const char *rule_id = va_arg(args, const char *); int result = va_arg(args, int); const char *error = va_arg(args, const char *); char *rc_str = pcmk__itoa(pcmk_rc2exitc(result)); pcmk__output_create_xml_node(out, PCMK_XE_RULE_CHECK, PCMK_XA_RULE_ID, rule_id, PCMK_XA_RC, rc_str, NULL); free(rc_str); switch (result) { case pcmk_rc_within_range: case pcmk_rc_ok: case pcmk_rc_after_range: case pcmk_rc_before_range: case pcmk_rc_op_unsatisfied: return pcmk_rc_ok; default: out->err(out, "Could not determine whether rule %s is in effect: %s", rule_id, ((error != NULL)? error : "unexpected error")); return pcmk_rc_ok; } } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_none(pcmk__output_t *out, va_list args) { return pcmk_rc_no_output; } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_text(pcmk__output_t *out, va_list args) { int code = va_arg(args, int); const char *name = va_arg(args, const char *); const char *desc = va_arg(args, const char *); static int code_width = 0; if (out->is_quiet(out)) { /* If out->is_quiet(), don't print the code. Print name and/or desc in a * compact format for text output, or print nothing at all for none-type * output. */ if ((name != NULL) && (desc != NULL)) { pcmk__formatted_printf(out, "%s - %s\n", name, desc); } else if ((name != NULL) || (desc != NULL)) { pcmk__formatted_printf(out, "%s\n", ((name != NULL)? name : desc)); } return pcmk_rc_ok; } /* Get length of longest (most negative) standard Pacemaker return code * This should be longer than all the values of any other type of return * code. */ if (code_width == 0) { long long most_negative = pcmk_rc_error - (long long) pcmk__n_rc + 1; code_width = snprintf(NULL, 0, "%lld", most_negative); pcmk__assert(code_width >= 0); } if ((name != NULL) && (desc != NULL)) { static int name_width = 0; if (name_width == 0) { // Get length of longest standard Pacemaker return code name for (int lpc = 0; lpc < pcmk__n_rc; lpc++) { int len = (int) strlen(pcmk_rc_name(pcmk_rc_error - lpc)); name_width = QB_MAX(name_width, len); } } return out->info(out, "% *d: %-*s %s", code_width, code, name_width, name, desc); } if ((name != NULL) || (desc != NULL)) { return out->info(out, "% *d: %s", code_width, code, ((name != NULL)? name : desc)); } return out->info(out, "% *d", code_width, code); } PCMK__OUTPUT_ARGS("result-code", "int", "const char *", "const char *") static int result_code_xml(pcmk__output_t *out, va_list args) { int code = va_arg(args, int); const char *name = va_arg(args, const char *); const char *desc = va_arg(args, const char *); char *code_str = pcmk__itoa(code); pcmk__output_create_xml_node(out, PCMK_XE_RESULT_CODE, PCMK_XA_CODE, code_str, PCMK_XA_NAME, name, PCMK_XA_DESCRIPTION, desc, NULL); free(code_str); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-attribute", "const char *", "const char *", "const char *") static int ticket_attribute_default(pcmk__output_t *out, va_list args) { const char *ticket_id G_GNUC_UNUSED = va_arg(args, const char *); const char *name G_GNUC_UNUSED = va_arg(args, const char *); const char *value = va_arg(args, const char *); out->info(out, "%s", value); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-attribute", "const char *", "const char *", "const char *") static int ticket_attribute_xml(pcmk__output_t *out, va_list args) { const char *ticket_id = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); /* Create: * * * * * */ pcmk__output_xml_create_parent(out, PCMK_XE_TICKETS, NULL); pcmk__output_xml_create_parent(out, PCMK_XE_TICKET, PCMK_XA_ID, ticket_id, NULL); pcmk__output_create_xml_node(out, PCMK_XA_ATTRIBUTE, PCMK_XA_NAME, name, PCMK_XA_VALUE, value, NULL); pcmk__output_xml_pop_parent(out); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-constraints", "xmlNode *") static int ticket_constraints_default(pcmk__output_t *out, va_list args) { xmlNode *constraint_xml = va_arg(args, xmlNode *); /* constraint_xml can take two forms: * * * * for when there's only one ticket in the CIB, or when the user asked * for a specific ticket (crm_ticket -c -t for instance) * * * * * * * for when there's multiple tickets in the and the user did not ask for * a specific one. * * In both cases, we simply output a element for each ticket * in the results. */ out->info(out, "Constraints XML:\n"); if (pcmk__xe_is(constraint_xml, PCMK__XE_XPATH_QUERY)) { xmlNode *child = pcmk__xe_first_child(constraint_xml, NULL, NULL, NULL); do { GString *buf = g_string_sized_new(1024); pcmk__xml_string(child, pcmk__xml_fmt_pretty, buf, 0); out->output_xml(out, PCMK_XE_CONSTRAINT, buf->str); g_string_free(buf, TRUE); child = pcmk__xe_next(child, NULL); } while (child != NULL); } else { GString *buf = g_string_sized_new(1024); pcmk__xml_string(constraint_xml, pcmk__xml_fmt_pretty, buf, 0); out->output_xml(out, PCMK_XE_CONSTRAINT, buf->str); g_string_free(buf, TRUE); } return pcmk_rc_ok; } static int add_ticket_element_with_constraints(xmlNode *node, void *userdata) { pcmk__output_t *out = (pcmk__output_t *) userdata; const char *ticket_id = pcmk__xe_get(node, PCMK_XA_TICKET); pcmk__output_xml_create_parent(out, PCMK_XE_TICKET, PCMK_XA_ID, ticket_id, NULL); pcmk__output_xml_create_parent(out, PCMK_XE_CONSTRAINTS, NULL); pcmk__output_xml_add_node_copy(out, node); /* Pop two parents so now we are back under the element */ pcmk__output_xml_pop_parent(out); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } static int add_resource_element(xmlNode *node, void *userdata) { pcmk__output_t *out = (pcmk__output_t *) userdata; const char *rsc = pcmk__xe_get(node, PCMK_XA_RSC); pcmk__output_create_xml_node(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-constraints", "xmlNode *") static int ticket_constraints_xml(pcmk__output_t *out, va_list args) { xmlNode *constraint_xml = va_arg(args, xmlNode *); /* Create: * * * * * * * ... * */ pcmk__output_xml_create_parent(out, PCMK_XE_TICKETS, NULL); if (pcmk__xe_is(constraint_xml, PCMK__XE_XPATH_QUERY)) { /* Iterate through the list of children once to create all the * ticket/constraint elements. */ pcmk__xe_foreach_child(constraint_xml, NULL, add_ticket_element_with_constraints, out); /* Put us back at the same level as where was created. */ pcmk__output_xml_pop_parent(out); /* Constraints can reference a resource ID that is defined in the XML * schema as an IDREF. This requires some other element to be present * with an id= attribute that matches. * * Iterate through the list of children a second time to create the * following: * * * * ... * */ pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL); pcmk__xe_foreach_child(constraint_xml, NULL, add_resource_element, out); pcmk__output_xml_pop_parent(out); } else { /* Creating the output for a single constraint is much easier. All the * comments in the above block apply here. */ add_ticket_element_with_constraints(constraint_xml, out); pcmk__output_xml_pop_parent(out); pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL); add_resource_element(constraint_xml, out); pcmk__output_xml_pop_parent(out); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-state", "xmlNode *") static int ticket_state_default(pcmk__output_t *out, va_list args) { xmlNode *state_xml = va_arg(args, xmlNode *); GString *buf = g_string_sized_new(1024); out->info(out, "State XML:\n"); pcmk__xml_string(state_xml, pcmk__xml_fmt_pretty, buf, 0); out->output_xml(out, PCMK__XE_TICKET_STATE, buf->str); g_string_free(buf, TRUE); return pcmk_rc_ok; } static int add_ticket_element(xmlNode *node, void *userdata) { pcmk__output_t *out = (pcmk__output_t *) userdata; xmlNode *ticket_node = NULL; ticket_node = pcmk__output_create_xml_node(out, PCMK_XE_TICKET, NULL); pcmk__xe_copy_attrs(ticket_node, node, pcmk__xaf_none); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-state", "xmlNode *") static int ticket_state_xml(pcmk__output_t *out, va_list args) { xmlNode *state_xml = va_arg(args, xmlNode *); /* Create: * * * ... * */ pcmk__output_xml_create_parent(out, PCMK_XE_TICKETS, NULL); if (state_xml->children != NULL) { /* Iterate through the list of children once to create all the * ticket elements. */ pcmk__xe_foreach_child(state_xml, PCMK__XE_TICKET_STATE, add_ticket_element, out); } else { add_ticket_element(state_xml, out); } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "attribute", "default", attribute_default }, { "attribute", "xml", attribute_xml }, { "cluster-status", "default", pcmk__cluster_status_text }, { "cluster-status", "html", cluster_status_html }, { "cluster-status", "xml", cluster_status_xml }, { "crmadmin-node", "default", crmadmin_node }, { "crmadmin-node", "text", crmadmin_node_text }, { "crmadmin-node", "xml", crmadmin_node_xml }, { "dc", "default", dc }, { "dc", "text", dc_text }, { "dc", "xml", dc_xml }, { "digests", "default", digests_text }, { "digests", "xml", digests_xml }, { "health", "default", health }, { "health", "text", health_text }, { "health", "xml", health_xml }, { "inject-attr", "default", inject_attr }, { "inject-attr", "xml", inject_attr_xml }, { "inject-cluster-action", "default", inject_cluster_action }, { "inject-cluster-action", "xml", inject_cluster_action_xml }, { "inject-fencing-action", "default", inject_fencing_action }, { "inject-fencing-action", "xml", inject_fencing_action_xml }, { "inject-modify-config", "default", inject_modify_config }, { "inject-modify-config", "xml", inject_modify_config_xml }, { "inject-modify-node", "default", inject_modify_node }, { "inject-modify-node", "xml", inject_modify_node_xml }, { "inject-modify-ticket", "default", inject_modify_ticket }, { "inject-modify-ticket", "xml", inject_modify_ticket_xml }, { "inject-pseudo-action", "default", inject_pseudo_action }, { "inject-pseudo-action", "xml", inject_pseudo_action_xml }, { "inject-rsc-action", "default", inject_rsc_action }, { "inject-rsc-action", "xml", inject_rsc_action_xml }, { "inject-spec", "default", inject_spec }, { "inject-spec", "xml", inject_spec_xml }, { "locations-and-colocations", "default", locations_and_colocations }, { "locations-and-colocations", "xml", locations_and_colocations_xml }, { "locations-list", "default", locations_list }, { "locations-list", "xml", locations_list_xml }, { "node-action", "default", node_action }, { "node-action", "xml", node_action_xml }, { "node-info", "default", node_info_default }, { "node-info", "xml", node_info_xml }, { "pacemakerd-health", "default", pacemakerd_health }, { "pacemakerd-health", "html", pacemakerd_health_html }, { "pacemakerd-health", "text", pacemakerd_health_text }, { "pacemakerd-health", "xml", pacemakerd_health_xml }, { "profile", "default", profile_default, }, { "profile", "xml", profile_xml }, { "result-code", PCMK_VALUE_NONE, result_code_none }, { "result-code", "text", result_code_text }, { "result-code", "xml", result_code_xml }, { "rsc-action", "default", rsc_action_default }, { "rsc-action-item", "default", rsc_action_item }, { "rsc-action-item", "xml", rsc_action_item_xml }, { "rsc-is-colocated-with-list", "default", rsc_is_colocated_with_list }, { "rsc-is-colocated-with-list", "xml", rsc_is_colocated_with_list_xml }, { "rscs-colocated-with-list", "default", rscs_colocated_with_list }, { "rscs-colocated-with-list", "xml", rscs_colocated_with_list_xml }, { "rule-check", "default", rule_check_default }, { "rule-check", "xml", rule_check_xml }, { "ticket-attribute", "default", ticket_attribute_default }, { "ticket-attribute", "xml", ticket_attribute_xml }, { "ticket-constraints", "default", ticket_constraints_default }, { "ticket-constraints", "xml", ticket_constraints_xml }, { "ticket-state", "default", ticket_state_default }, { "ticket-state", "xml", ticket_state_xml }, { NULL, NULL, NULL } }; void pcmk__register_lib_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); } diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c index ac29dd444c..cd80f493ae 100644 --- a/lib/pacemaker/pcmk_sched_constraints.c +++ b/lib/pacemaker/pcmk_sched_constraints.c @@ -1,442 +1,442 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "libpacemaker_private.h" /*! * \internal * \brief Unpack constraints from XML * * Given scheduler data, unpack all constraints from its input XML into * data structures. * * \param[in,out] scheduler Scheduler data */ void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler) { xmlNode *xml_constraints = pcmk_find_cib_element(scheduler->input, PCMK_XE_CONSTRAINTS); for (xmlNode *xml_obj = pcmk__xe_first_child(xml_constraints, NULL, NULL, NULL); xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj, NULL)) { const char *id = pcmk__xe_get(xml_obj, PCMK_XA_ID); const char *tag = (const char *) xml_obj->name; if (id == NULL) { pcmk__config_err("Ignoring <%s> constraint without " PCMK_XA_ID, tag); continue; } crm_trace("Unpacking %s constraint '%s'", tag, id); if (pcmk__str_eq(PCMK_XE_RSC_ORDER, tag, pcmk__str_none)) { pcmk__unpack_ordering(xml_obj, scheduler); } else if (pcmk__str_eq(PCMK_XE_RSC_COLOCATION, tag, pcmk__str_none)) { pcmk__unpack_colocation(xml_obj, scheduler); } else if (pcmk__str_eq(PCMK_XE_RSC_LOCATION, tag, pcmk__str_none)) { pcmk__unpack_location(xml_obj, scheduler); } else if (pcmk__str_eq(PCMK_XE_RSC_TICKET, tag, pcmk__str_none)) { pcmk__unpack_rsc_ticket(xml_obj, scheduler); } else { pcmk__config_err("Unsupported constraint type: %s", tag); } } } pcmk_resource_t * pcmk__find_constraint_resource(GList *rsc_list, const char *id) { if (id == NULL) { return NULL; } for (GList *iter = rsc_list; iter != NULL; iter = iter->next) { pcmk_resource_t *parent = iter->data; pcmk_resource_t *match = NULL; match = parent->priv->fns->find_rsc(parent, id, NULL, pcmk_rsc_match_history); if (match != NULL) { if (!pcmk__str_eq(match->id, id, pcmk__str_none)) { /* We found an instance of a clone instead */ match = uber_parent(match); crm_debug("Found %s for %s", match->id, id); } return match; } } crm_trace("No match for %s", id); return NULL; } /*! * \internal * \brief Check whether an ID references a resource tag * * \param[in] scheduler Scheduler data * \param[in] id Tag ID to search for * \param[out] tag Where to store tag, if found * * \return true if ID refers to a tagged resource or resource set template, * otherwise false */ static bool find_constraint_tag(const pcmk_scheduler_t *scheduler, const char *id, pcmk__idref_t **tag) { *tag = NULL; // Check whether id refers to a resource set template if (g_hash_table_lookup_extended(scheduler->priv->templates, id, NULL, (gpointer *) tag)) { if (*tag == NULL) { crm_notice("No resource is derived from template '%s'", id); return false; } return true; } // If not, check whether id refers to a tag if (g_hash_table_lookup_extended(scheduler->priv->tags, id, NULL, (gpointer *) tag)) { if (*tag == NULL) { crm_notice("No resource is tagged with '%s'", id); return false; } return true; } pcmk__config_warn("No resource, template, or tag named '%s'", id); return false; } /*! * \internal * \brief Parse a role attribute from a constraint * * This is like pcmk_parse_role() except that started is treated as * pcmk_role_unknown (indicating any role), and the return value is * pcmk_rc_unpack_error for invalid specifications. * * \param[in] id ID of constraint being parsed (for logging only) * \param[in] role_spec Role specification * \param[in] role Where to store parsed role * * \return Standard Pacemaker return code */ int pcmk__parse_constraint_role(const char *id, const char *role_spec, enum rsc_role_e *role) { *role = pcmk_parse_role(role_spec); switch (*role) { case pcmk_role_unknown: if (role_spec != NULL) { pcmk__config_err("Ignoring constraint %s: Invalid role '%s'", id, role_spec); return pcmk_rc_unpack_error; } break; case pcmk_role_started: *role = pcmk_role_unknown; break; default: break; } return pcmk_rc_ok; } /*! * \brief * \internal Check whether an ID refers to a valid resource or tag * * \param[in] scheduler Scheduler data * \param[in] id ID to search for * \param[out] rsc Where to store resource, if found * (or NULL to skip searching resources) * \param[out] tag Where to store tag, if found * (or NULL to skip searching tags) * * \return true if id refers to a resource (possibly indirectly via a tag) */ bool pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler, const char *id, pcmk_resource_t **rsc, pcmk__idref_t **tag) { if (rsc != NULL) { *rsc = pcmk__find_constraint_resource(scheduler->priv->resources, id); if (*rsc != NULL) { return true; } } if ((tag != NULL) && find_constraint_tag(scheduler, id, tag)) { return true; } return false; } /*! * \internal * \brief Replace any resource tags with equivalent \C PCMK_XE_RESOURCE_REF * entries * * If a given constraint has resource sets, check each set for * \c PCMK_XE_RESOURCE_REF entries that list tags rather than resource IDs, and * replace any found with \c PCMK_XE_RESOURCE_REF entries for the corresponding * resource IDs. * * \param[in,out] xml_obj Constraint XML * \param[in] scheduler Scheduler data * * \return Equivalent XML with resource tags replaced (or NULL if none) * \note It is the caller's responsibility to free the return value with * \c pcmk__xml_free(). */ xmlNode * pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pcmk_scheduler_t *scheduler) { xmlNode *new_xml = NULL; bool any_refs = false; // Short-circuit if there are no sets if (pcmk__xe_first_child(xml_obj, PCMK_XE_RESOURCE_SET, NULL, NULL) == NULL) { return NULL; } new_xml = pcmk__xml_copy(NULL, xml_obj); for (xmlNode *set = pcmk__xe_first_child(new_xml, PCMK_XE_RESOURCE_SET, NULL, NULL); set != NULL; set = pcmk__xe_next(set, PCMK_XE_RESOURCE_SET)) { GList *tag_refs = NULL; GList *iter = NULL; for (xmlNode *xml_rsc = pcmk__xe_first_child(set, PCMK_XE_RESOURCE_REF, NULL, NULL); xml_rsc != NULL; xml_rsc = pcmk__xe_next(xml_rsc, PCMK_XE_RESOURCE_REF)) { pcmk_resource_t *rsc = NULL; pcmk__idref_t *tag = NULL; if (!pcmk__valid_resource_or_tag(scheduler, pcmk__xe_id(xml_rsc), &rsc, &tag)) { pcmk__config_err("Ignoring resource sets for constraint '%s' " "because '%s' is not a valid resource or tag", pcmk__xe_id(xml_obj), pcmk__xe_id(xml_rsc)); pcmk__xml_free(new_xml); return NULL; } else if (rsc) { continue; } else if (tag) { /* PCMK_XE_RESOURCE_REF under PCMK_XE_RESOURCE_SET references * template or tag */ xmlNode *last_ref = xml_rsc; /* For example, given the original XML: * * * * * * * * If rsc2 and rsc3 are tagged with tag1, we add them after it: * * * * * * * * */ for (iter = tag->refs; iter != NULL; iter = iter->next) { const char *ref_id = iter->data; xmlNode *new_ref = pcmk__xe_create(set, PCMK_XE_RESOURCE_REF); pcmk__xe_set(new_ref, PCMK_XA_ID, ref_id); xmlAddNextSibling(last_ref, new_ref); last_ref = new_ref; } any_refs = true; /* Freeing the resource_ref now would break the XML child * iteration, so just remember it for freeing later. */ tag_refs = g_list_append(tag_refs, xml_rsc); } } /* Now free '', and finally get: */ for (iter = tag_refs; iter != NULL; iter = iter->next) { xmlNode *tag_ref = iter->data; pcmk__xml_free(tag_ref); } g_list_free(tag_refs); } if (!any_refs) { pcmk__xml_free(new_xml); new_xml = NULL; } return new_xml; } /*! * \internal * \brief Convert a tag into a resource set of tagged resources * * \param[in,out] xml_obj Constraint XML * \param[out] rsc_set Where to store resource set XML * \param[in] attr Name of XML attribute with resource or tag ID * \param[in] convert_rsc If true, convert to set even if \p attr * references a resource * \param[in] scheduler Scheduler data */ bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr, bool convert_rsc, const pcmk_scheduler_t *scheduler) { const char *cons_id = NULL; const char *id = NULL; pcmk_resource_t *rsc = NULL; pcmk__idref_t *tag = NULL; *rsc_set = NULL; CRM_CHECK((xml_obj != NULL) && (attr != NULL), return false); cons_id = pcmk__xe_id(xml_obj); if (cons_id == NULL) { pcmk__config_err("Ignoring <%s> constraint without " PCMK_XA_ID, xml_obj->name); return false; } id = pcmk__xe_get(xml_obj, attr); if (id == NULL) { return true; } if (!pcmk__valid_resource_or_tag(scheduler, id, &rsc, &tag)) { pcmk__config_err("Ignoring constraint '%s' because '%s' is not a " "valid resource or tag", cons_id, id); return false; } else if (tag) { /* The "attr" attribute (for a resource in a constraint) specifies a * template or tag. Add the corresponding PCMK_XE_RESOURCE_SET * containing the resources derived from or tagged with it. */ *rsc_set = pcmk__xe_create(xml_obj, PCMK_XE_RESOURCE_SET); pcmk__xe_set(*rsc_set, PCMK_XA_ID, id); for (GList *iter = tag->refs; iter != NULL; iter = iter->next) { const char *obj_ref = iter->data; xmlNode *rsc_ref = NULL; rsc_ref = pcmk__xe_create(*rsc_set, PCMK_XE_RESOURCE_REF); pcmk__xe_set(rsc_ref, PCMK_XA_ID, obj_ref); } // Set PCMK_XA_SEQUENTIAL=PCMK_VALUE_FALSE for the PCMK_XE_RESOURCE_SET - pcmk__xe_set_bool_attr(*rsc_set, PCMK_XA_SEQUENTIAL, false); + pcmk__xe_set_bool(*rsc_set, PCMK_XA_SEQUENTIAL, false); } else if ((rsc != NULL) && convert_rsc) { /* Even if a regular resource is referenced by "attr", convert it into a * PCMK_XE_RESOURCE_SET, because the other resource reference in the * constraint could be a template or tag. */ xmlNode *rsc_ref = NULL; *rsc_set = pcmk__xe_create(xml_obj, PCMK_XE_RESOURCE_SET); pcmk__xe_set(*rsc_set, PCMK_XA_ID, id); rsc_ref = pcmk__xe_create(*rsc_set, PCMK_XE_RESOURCE_REF); pcmk__xe_set(rsc_ref, PCMK_XA_ID, id); } else { return true; } /* Remove the "attr" attribute referencing the template/tag */ if (*rsc_set != NULL) { pcmk__xe_remove_attr(xml_obj, attr); } return true; } /*! * \internal * \brief Create constraints inherent to resource types * * \param[in,out] scheduler Scheduler data */ void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler) { crm_trace("Create internal constraints"); for (GList *iter = scheduler->priv->resources; iter != NULL; iter = iter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data; rsc->priv->cmds->internal_constraints(rsc); } } diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c index 11f677aaba..30387c8581 100644 --- a/lib/pengine/pe_output.c +++ b/lib/pengine/pe_output.c @@ -1,3517 +1,3517 @@ /* * Copyright 2019-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. */ #include #include #include // g_strchomp() #include // xmlNode #include #include #include #include #include #include const char * pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts) { const char * desc = NULL; // User-supplied description if (pcmk__any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) { desc = pcmk__xe_get(rsc->priv->xml, PCMK_XA_DESCRIPTION); } return desc; } /* Never display node attributes whose name starts with one of these prefixes */ #define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \ PCMK__NODE_ATTR_SHUTDOWN, PCMK_NODE_ATTR_TERMINATE, \ PCMK_NODE_ATTR_STANDBY, "#", NULL } static int compare_attribute(gconstpointer a, gconstpointer b) { int rc; rc = strcmp((const char *)a, (const char *)b); return rc; } /*! * \internal * \brief Determine whether extended information about an attribute should be added. * * \param[in] node Node that ran this resource * \param[in,out] rsc_list List of resources for this node * \param[in,out] scheduler Scheduler data * \param[in] attrname Attribute to find * \param[out] expected_score Expected value for this attribute * * \return true if extended information should be printed, false otherwise * \note Currently, extended information is only supported for ping/pingd * resources, for which a message will be printed if connectivity is lost * or degraded. */ static bool add_extra_info(const pcmk_node_t *node, GList *rsc_list, pcmk_scheduler_t *scheduler, const char *attrname, int *expected_score) { GList *gIter = NULL; for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data; const char *type = g_hash_table_lookup(rsc->priv->meta, PCMK_XA_TYPE); const char *name = NULL; GHashTable *params = NULL; if (rsc->priv->children != NULL) { if (add_extra_info(node, rsc->priv->children, scheduler, attrname, expected_score)) { return true; } } if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) { continue; } params = pe_rsc_params(rsc, node, scheduler); name = g_hash_table_lookup(params, PCMK_XA_NAME); if (name == NULL) { name = "pingd"; } /* To identify the resource with the attribute name. */ if (pcmk__str_eq(name, attrname, pcmk__str_casei)) { int host_list_num = 0; const char *hosts = g_hash_table_lookup(params, "host_list"); const char *multiplier = g_hash_table_lookup(params, "multiplier"); int multiplier_i; if (hosts) { char **host_list = g_strsplit(hosts, " ", 0); host_list_num = g_strv_length(host_list); g_strfreev(host_list); } if ((multiplier == NULL) || (pcmk__scan_min_int(multiplier, &multiplier_i, INT_MIN) != pcmk_rc_ok)) { /* The ocf:pacemaker:ping resource agent defaults multiplier to * 1. The agent currently does not handle invalid text, but it * should, and this would be a reasonable choice ... */ multiplier_i = 1; } *expected_score = host_list_num * multiplier_i; return true; } } return false; } static GList * filter_attr_list(GList *attr_list, char *name) { int i; const char *filt_str[] = FILTER_STR; CRM_CHECK(name != NULL, return attr_list); /* filtering automatic attributes */ for (i = 0; filt_str[i] != NULL; i++) { if (g_str_has_prefix(name, filt_str[i])) { return attr_list; } } return g_list_insert_sorted(attr_list, name, compare_attribute); } static GList * get_operation_list(xmlNode *rsc_entry) { GList *op_list = NULL; xmlNode *rsc_op = NULL; for (rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP, NULL, NULL); rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op, PCMK__XE_LRM_RSC_OP)) { const char *task = pcmk__xe_get(rsc_op, PCMK_XA_OPERATION); if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) { continue; // Ignore notify actions } else { int exit_status; pcmk__scan_min_int(pcmk__xe_get(rsc_op, PCMK__XA_RC_CODE), &exit_status, 0); if ((exit_status == CRM_EX_NOT_RUNNING) && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none) && pcmk__str_eq(pcmk__xe_get(rsc_op, PCMK_META_INTERVAL), "0", pcmk__str_null_matches)) { continue; // Ignore probes that found the resource not running } } op_list = g_list_append(op_list, rsc_op); } op_list = g_list_sort(op_list, sort_op_by_callid); return op_list; } static void add_dump_node(gpointer key, gpointer value, gpointer user_data) { xmlNodePtr node = user_data; node = pcmk__xe_create(node, (const char *) key); pcmk__xe_set_content(node, "%s", (const char *) value); } static void append_dump_text(gpointer key, gpointer value, gpointer user_data) { char **dump_text = user_data; char *new_text = pcmk__assert_asprintf("%s %s=%s", *dump_text, (const char *) key, (const char *)value); free(*dump_text); *dump_text = new_text; } #define XPATH_STACK "//" PCMK_XE_NVPAIR \ "[@" PCMK_XA_NAME "='" \ PCMK_OPT_CLUSTER_INFRASTRUCTURE "']" static const char * get_cluster_stack(pcmk_scheduler_t *scheduler) { xmlNode *stack = pcmk__xpath_find_one(scheduler->input->doc, XPATH_STACK, LOG_DEBUG); if (stack != NULL) { return pcmk__xe_get(stack, PCMK_XA_VALUE); } return PCMK_VALUE_UNKNOWN; } static char * last_changed_string(const char *last_written, const char *user, const char *client, const char *origin) { if (last_written != NULL || user != NULL || client != NULL || origin != NULL) { return pcmk__assert_asprintf("%s%s%s%s%s%s%s", pcmk__s(last_written, ""), ((user != NULL)? " by " : ""), pcmk__s(user, ""), ((client != NULL) ? " via " : ""), pcmk__s(client, ""), ((origin != NULL)? " on " : ""), pcmk__s(origin, "")); } else { return strdup(""); } } static char * op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s, int rc, bool print_timing) { const char *call = pcmk__xe_get(xml_op, PCMK__XA_CALL_ID); char *interval_str = NULL; char *buf = NULL; if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { char *pair = pcmk__format_nvpair(PCMK_XA_INTERVAL, interval_ms_s, "ms"); interval_str = pcmk__assert_asprintf(" %s", pair); free(pair); } if (print_timing) { char *last_change_str = NULL; char *exec_str = NULL; char *queue_str = NULL; const char *value = NULL; time_t epoch = 0; pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &epoch); if (epoch > 0) { char *epoch_str = pcmk__epoch2str(&epoch, 0); last_change_str = pcmk__assert_asprintf(" %s=\"%s\"", PCMK_XA_LAST_RC_CHANGE, pcmk__s(epoch_str, "")); free(epoch_str); } value = pcmk__xe_get(xml_op, PCMK_XA_EXEC_TIME); if (value) { char *pair = pcmk__format_nvpair(PCMK_XA_EXEC_TIME, value, "ms"); exec_str = pcmk__assert_asprintf(" %s", pair); free(pair); } value = pcmk__xe_get(xml_op, PCMK_XA_QUEUE_TIME); if (value) { char *pair = pcmk__format_nvpair(PCMK_XA_QUEUE_TIME, value, "ms"); queue_str = pcmk__assert_asprintf(" %s", pair); free(pair); } buf = pcmk__assert_asprintf("(%s) %s:%s%s%s%s rc=%d (%s)", call, task, pcmk__s(interval_str, ""), pcmk__s(last_change_str, ""), pcmk__s(exec_str, ""), pcmk__s(queue_str, ""), rc, crm_exit_str(rc)); if (last_change_str) { free(last_change_str); } if (exec_str) { free(exec_str); } if (queue_str) { free(queue_str); } } else { buf = pcmk__assert_asprintf("(%s) %s%s%s", call, task, ((interval_str != NULL)? ":" : ""), pcmk__s(interval_str, "")); } if (interval_str) { free(interval_str); } return buf; } static char * resource_history_string(pcmk_resource_t *rsc, const char *rsc_id, bool all, int failcount, time_t last_failure) { char *buf = NULL; if (rsc == NULL) { /* @COMPAT "orphan" is deprecated since 3.0.2. Replace with "removed" at * a compatibility break. */ buf = pcmk__assert_asprintf("%s: orphan", rsc_id); } else if (all || failcount || last_failure > 0) { char *failcount_s = NULL; char *lastfail_s = NULL; if (failcount > 0) { failcount_s = pcmk__assert_asprintf(" " PCMK_XA_FAIL_COUNT "=%d", failcount); } else { failcount_s = strdup(""); } if (last_failure > 0) { buf = pcmk__epoch2str(&last_failure, 0); lastfail_s = pcmk__assert_asprintf(" " PCMK_XA_LAST_FAILURE "='%s'", buf); free(buf); } buf = pcmk__assert_asprintf("%s: " PCMK_META_MIGRATION_THRESHOLD "=%d%s%s", rsc_id, rsc->priv->ban_after_failures, failcount_s, pcmk__s(lastfail_s, "")); free(failcount_s); free(lastfail_s); } else { buf = pcmk__assert_asprintf("%s:", rsc_id); } return buf; } /*! * \internal * \brief Get a node's feature set for status display purposes * * \param[in] node Node to check * * \return String representation of feature set if the node is fully up (using * "<3.15.1" for older nodes that don't set the #feature-set attribute), * otherwise NULL */ static const char * get_node_feature_set(const pcmk_node_t *node) { if (node->details->online && pcmk__is_set(node->priv->flags, pcmk__node_expected_up) && !pcmk__is_pacemaker_remote_node(node)) { const char *feature_set = g_hash_table_lookup(node->priv->attrs, CRM_ATTR_FEATURE_SET); /* The feature set attribute is present since 3.15.1. If it is missing, * then the node must be running an earlier version. */ return pcmk__s(feature_set, "<3.15.1"); } return NULL; } static bool is_mixed_version(pcmk_scheduler_t *scheduler) { const char *feature_set = NULL; for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = gIter->data; const char *node_feature_set = get_node_feature_set(node); if (node_feature_set != NULL) { if (feature_set == NULL) { feature_set = node_feature_set; } else if (strcmp(feature_set, node_feature_set) != 0) { return true; } } } return false; } static void formatted_xml_buf(const pcmk_resource_t *rsc, GString *xml_buf, bool raw) { if (raw && (rsc->priv->orig_xml != NULL)) { pcmk__xml_string(rsc->priv->orig_xml, pcmk__xml_fmt_pretty, xml_buf, 0); } else { pcmk__xml_string(rsc->priv->xml, pcmk__xml_fmt_pretty, xml_buf, 0); } } #define XPATH_DC_VERSION "//" PCMK_XE_NVPAIR \ "[@" PCMK_XA_NAME "='" PCMK_OPT_DC_VERSION "']" PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t") static int cluster_summary(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); int rc = pcmk_rc_no_output; const char *stack_s = get_cluster_stack(scheduler); if (pcmk__is_set(section_opts, pcmk_section_stack)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-stack", stack_s, pcmkd_state); } if (pcmk__is_set(section_opts, pcmk_section_dc)) { xmlNode *dc_version = pcmk__xpath_find_one(scheduler->input->doc, XPATH_DC_VERSION, LOG_DEBUG); const char *dc_version_s = dc_version? pcmk__xe_get(dc_version, PCMK_XA_VALUE) : NULL; const char *quorum = pcmk__xe_get(scheduler->input, PCMK_XA_HAVE_QUORUM); char *dc_name = NULL; const bool mixed_version = is_mixed_version(scheduler); if (scheduler->dc_node != NULL) { dc_name = pe__node_display_name(scheduler->dc_node, pcmk__is_set(show_opts, pcmk_show_node_id)); } PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-dc", scheduler->dc_node, quorum, dc_version_s, dc_name, mixed_version); free(dc_name); } if (pcmk__is_set(section_opts, pcmk_section_times)) { const char *last_written = pcmk__xe_get(scheduler->input, PCMK_XA_CIB_LAST_WRITTEN); const char *user = pcmk__xe_get(scheduler->input, PCMK_XA_UPDATE_USER); const char *client = pcmk__xe_get(scheduler->input, PCMK_XA_UPDATE_CLIENT); const char *origin = pcmk__xe_get(scheduler->input, PCMK_XA_UPDATE_ORIGIN); PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-times", scheduler->priv->local_node_name, last_written, user, client, origin); } if (pcmk__is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes), scheduler->priv->ninstances, scheduler->priv->disabled_resources, scheduler->priv->blocked_resources); } if (pcmk__is_set(section_opts, pcmk_section_options)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-options", scheduler); } PCMK__OUTPUT_LIST_FOOTER(out, rc); if (pcmk__is_set(section_opts, pcmk_section_maint_mode)) { if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } return rc; } PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *", "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t") static int cluster_summary_html(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); int rc = pcmk_rc_no_output; const char *stack_s = get_cluster_stack(scheduler); if (pcmk__is_set(section_opts, pcmk_section_stack)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-stack", stack_s, pcmkd_state); } /* Always print DC if none, even if not requested */ if ((scheduler->dc_node == NULL) || pcmk__is_set(section_opts, pcmk_section_dc)) { xmlNode *dc_version = pcmk__xpath_find_one(scheduler->input->doc, XPATH_DC_VERSION, LOG_DEBUG); const char *dc_version_s = dc_version? pcmk__xe_get(dc_version, PCMK_XA_VALUE) : NULL; const char *quorum = pcmk__xe_get(scheduler->input, PCMK_XA_HAVE_QUORUM); char *dc_name = NULL; const bool mixed_version = is_mixed_version(scheduler); if (scheduler->dc_node != NULL) { dc_name = pe__node_display_name(scheduler->dc_node, pcmk__is_set(show_opts, pcmk_show_node_id)); } PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-dc", scheduler->dc_node, quorum, dc_version_s, dc_name, mixed_version); free(dc_name); } if (pcmk__is_set(section_opts, pcmk_section_times)) { const char *last_written = pcmk__xe_get(scheduler->input, PCMK_XA_CIB_LAST_WRITTEN); const char *user = pcmk__xe_get(scheduler->input, PCMK_XA_UPDATE_USER); const char *client = pcmk__xe_get(scheduler->input, PCMK_XA_UPDATE_CLIENT); const char *origin = pcmk__xe_get(scheduler->input, PCMK_XA_UPDATE_ORIGIN); PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-times", scheduler->priv->local_node_name, last_written, user, client, origin); } if (pcmk__is_set(section_opts, pcmk_section_counts)) { PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary"); out->message(out, "cluster-counts", g_list_length(scheduler->nodes), scheduler->priv->ninstances, scheduler->priv->disabled_resources, scheduler->priv->blocked_resources); } if (pcmk__is_set(section_opts, pcmk_section_options)) { /* Kind of a hack - close the list we may have opened earlier in this * function so we can put all the options into their own list. We * only want to do this on HTML output, though. */ PCMK__OUTPUT_LIST_FOOTER(out, rc); out->begin_list(out, NULL, NULL, "Config Options"); out->message(out, "cluster-options", scheduler); } PCMK__OUTPUT_LIST_FOOTER(out, rc); if (pcmk__is_set(section_opts, pcmk_section_maint_mode)) { if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) { rc = pcmk_rc_ok; } } return rc; } char * pe__node_display_name(pcmk_node_t *node, bool print_detail) { char *node_name; const char *node_host = NULL; const char *node_id = NULL; int name_len; pcmk__assert((node != NULL) && (node->priv->name != NULL)); /* Host is displayed only if this is a guest node and detail is requested */ if (print_detail && pcmk__is_guest_or_bundle_node(node)) { const pcmk_resource_t *launcher = NULL; const pcmk_node_t *host_node = NULL; launcher = node->priv->remote->priv->launcher; host_node = pcmk__current_node(launcher); if (host_node && host_node->details) { node_host = host_node->priv->name; } if (node_host == NULL) { node_host = ""; /* so we at least get "uname@" to indicate guest */ } } /* Node ID is displayed if different from uname and detail is requested */ if (print_detail && !pcmk__str_eq(node->priv->name, node->priv->id, pcmk__str_casei)) { node_id = node->priv->id; } /* Determine name length */ name_len = strlen(node->priv->name) + 1; if (node_host) { name_len += strlen(node_host) + 1; /* "@node_host" */ } if (node_id) { name_len += strlen(node_id) + 3; /* + " (node_id)" */ } /* Allocate and populate display name */ node_name = pcmk__assert_alloc(name_len, sizeof(char)); strcpy(node_name, node->priv->name); if (node_host) { strcat(node_name, "@"); strcat(node_name, node_host); } if (node_id) { strcat(node_name, " ("); strcat(node_name, node_id); strcat(node_name, ")"); } return node_name; } int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name, ...) { xmlNodePtr xml_node = NULL; va_list pairs; pcmk__assert(tag_name != NULL); xml_node = pcmk__output_xml_peek_parent(out); pcmk__assert(xml_node != NULL); xml_node = pcmk__xe_create(xml_node, tag_name); va_start(pairs, tag_name); pcmk__xe_set_propv(xml_node, pairs); va_end(pairs); if (is_list) { pcmk__output_xml_push_parent(out, xml_node); } return pcmk_rc_ok; } static const char * role_desc(enum rsc_role_e role) { if (role == pcmk_role_promoted) { return "in " PCMK_ROLE_PROMOTED " role "; } return ""; } PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pcmk__location_t *", "uint32_t") static int ban_html(pcmk__output_t *out, va_list args) { pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *); pcmk__location_t *location = va_arg(args, pcmk__location_t *); uint32_t show_opts = va_arg(args, uint32_t); char *node_name = pe__node_display_name(pe_node, pcmk__is_set(show_opts, pcmk_show_node_id)); char *buf = pcmk__assert_asprintf("%s\tprevents %s from running %son %s", location->id, location->rsc->id, role_desc(location->role_filter), node_name); pcmk__output_create_html_node(out, "li", NULL, NULL, buf); free(node_name); free(buf); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pcmk__location_t *", "uint32_t") static int ban_text(pcmk__output_t *out, va_list args) { pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *); pcmk__location_t *location = va_arg(args, pcmk__location_t *); uint32_t show_opts = va_arg(args, uint32_t); char *node_name = pe__node_display_name(pe_node, pcmk__is_set(show_opts, pcmk_show_node_id)); out->list_item(out, NULL, "%s\tprevents %s from running %son %s", location->id, location->rsc->id, role_desc(location->role_filter), node_name); free(node_name); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pcmk__location_t *", "uint32_t") static int ban_xml(pcmk__output_t *out, va_list args) { pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *); pcmk__location_t *location = va_arg(args, pcmk__location_t *); uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t); const char *promoted_only = pcmk__btoa(location->role_filter == pcmk_role_promoted); char *weight_s = pcmk__itoa(pe_node->assign->score); pcmk__output_create_xml_node(out, PCMK_XE_BAN, PCMK_XA_ID, location->id, PCMK_XA_RESOURCE, location->rsc->id, PCMK_XA_NODE, pe_node->priv->name, PCMK_XA_WEIGHT, weight_s, PCMK_XA_PROMOTED_ONLY, promoted_only, /* This is a deprecated alias for * promoted_only. Removing it will break * backward compatibility of the API schema, * which will require an API schema major * version bump. */ PCMK__XA_PROMOTED_ONLY_LEGACY, promoted_only, NULL); free(weight_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ban-list", "pcmk_scheduler_t *", "const char *", "GList *", "uint32_t", "bool") static int ban_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); const char *prefix = va_arg(args, const char *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); GList *gIter, *gIter2; int rc = pcmk_rc_no_output; /* Print each ban */ for (gIter = scheduler->priv->location_constraints; gIter != NULL; gIter = gIter->next) { pcmk__location_t *location = gIter->data; const pcmk_resource_t *rsc = location->rsc; if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) { continue; } if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) && !pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc, false)), only_rsc, pcmk__str_star_matches)) { continue; } for (gIter2 = location->nodes; gIter2 != NULL; gIter2 = gIter2->next) { pcmk_node_t *node = (pcmk_node_t *) gIter2->data; if (node->assign->score < 0) { PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints"); out->message(out, "ban", node, location, show_opts); } } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") static int cluster_counts_html(pcmk__output_t *out, va_list args) { unsigned int nnodes = va_arg(args, unsigned int); int nresources = va_arg(args, int); int ndisabled = va_arg(args, int); int nblocked = va_arg(args, int); xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; child = pcmk__html_create(nodes_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%d node%s configured", nnodes, pcmk__plural_s(nnodes)); if (ndisabled && nblocked) { child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%d resource instance%s configured (%d ", nresources, pcmk__plural_s(nresources), ndisabled); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "DISABLED"); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, ", %d ", nblocked); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "BLOCKED"); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " from further action due to failure)"); } else if (ndisabled && !nblocked) { child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%d resource instance%s configured (%d ", nresources, pcmk__plural_s(nresources), ndisabled); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "DISABLED"); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, ")"); } else if (!ndisabled && nblocked) { child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%d resource instance%s configured (%d ", nresources, pcmk__plural_s(nresources), nblocked); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "BLOCKED"); child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " from further action due to failure)"); } else { child = pcmk__html_create(resources_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%d resource instance%s configured", nresources, pcmk__plural_s(nresources)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") static int cluster_counts_text(pcmk__output_t *out, va_list args) { unsigned int nnodes = va_arg(args, unsigned int); int nresources = va_arg(args, int); int ndisabled = va_arg(args, int); int nblocked = va_arg(args, int); out->list_item(out, NULL, "%d node%s configured", nnodes, pcmk__plural_s(nnodes)); if (ndisabled && nblocked) { out->list_item(out, NULL, "%d resource instance%s configured " "(%d DISABLED, %d BLOCKED from " "further action due to failure)", nresources, pcmk__plural_s(nresources), ndisabled, nblocked); } else if (ndisabled && !nblocked) { out->list_item(out, NULL, "%d resource instance%s configured " "(%d DISABLED)", nresources, pcmk__plural_s(nresources), ndisabled); } else if (!ndisabled && nblocked) { out->list_item(out, NULL, "%d resource instance%s configured " "(%d BLOCKED from further action " "due to failure)", nresources, pcmk__plural_s(nresources), nblocked); } else { out->list_item(out, NULL, "%d resource instance%s configured", nresources, pcmk__plural_s(nresources)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int") static int cluster_counts_xml(pcmk__output_t *out, va_list args) { unsigned int nnodes = va_arg(args, unsigned int); int nresources = va_arg(args, int); int ndisabled = va_arg(args, int); int nblocked = va_arg(args, int); xmlNodePtr nodes_node = NULL; xmlNodePtr resources_node = NULL; char *s = NULL; nodes_node = pcmk__output_create_xml_node(out, PCMK_XE_NODES_CONFIGURED, NULL); resources_node = pcmk__output_create_xml_node(out, PCMK_XE_RESOURCES_CONFIGURED, NULL); s = pcmk__itoa(nnodes); pcmk__xe_set(nodes_node, PCMK_XA_NUMBER, s); free(s); s = pcmk__itoa(nresources); pcmk__xe_set(resources_node, PCMK_XA_NUMBER, s); free(s); s = pcmk__itoa(ndisabled); pcmk__xe_set(resources_node, PCMK_XA_DISABLED, s); free(s); s = pcmk__itoa(nblocked); pcmk__xe_set(resources_node, PCMK_XA_BLOCKED, s); free(s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *", "char *", "int") static int cluster_dc_html(pcmk__output_t *out, va_list args) { pcmk_node_t *dc = va_arg(args, pcmk_node_t *); const char *quorum = va_arg(args, const char *); const char *dc_version_s = va_arg(args, const char *); char *dc_name = va_arg(args, char *); bool mixed_version = va_arg(args, int); xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "Current DC: "); if (dc) { child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%s (version %s) -", dc_name, pcmk__s(dc_version_s, "unknown")); if (mixed_version) { child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_WARNING); pcmk__xe_set_content(child, " MIXED-VERSION"); } child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " partition"); if (pcmk__is_true(quorum)) { child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " with"); } else { child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_WARNING); pcmk__xe_set_content(child, " WITHOUT"); } child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " quorum"); } else { child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_WARNING); pcmk__xe_set_content(child, "NONE"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *", "char *", "int") static int cluster_dc_text(pcmk__output_t *out, va_list args) { pcmk_node_t *dc = va_arg(args, pcmk_node_t *); const char *quorum = va_arg(args, const char *); const char *dc_version_s = va_arg(args, const char *); char *dc_name = va_arg(args, char *); bool mixed_version = va_arg(args, int); if (dc) { out->list_item(out, "Current DC", "%s (version %s) - %spartition %s quorum", dc_name, dc_version_s ? dc_version_s : "unknown", mixed_version ? "MIXED-VERSION " : "", pcmk__is_true(quorum) ? "with" : "WITHOUT"); } else { out->list_item(out, "Current DC", "NONE"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *", "char *", "int") static int cluster_dc_xml(pcmk__output_t *out, va_list args) { pcmk_node_t *dc = va_arg(args, pcmk_node_t *); const char *quorum = va_arg(args, const char *); const char *dc_version_s = va_arg(args, const char *); char *dc_name G_GNUC_UNUSED = va_arg(args, char *); bool mixed_version = va_arg(args, int); if (dc) { const char *with_quorum = pcmk__btoa(pcmk__is_true(quorum)); const char *mixed_version_s = pcmk__btoa(mixed_version); pcmk__output_create_xml_node(out, PCMK_XE_CURRENT_DC, PCMK_XA_PRESENT, PCMK_VALUE_TRUE, PCMK_XA_VERSION, pcmk__s(dc_version_s, ""), PCMK_XA_NAME, dc->priv->name, PCMK_XA_ID, dc->priv->id, PCMK_XA_WITH_QUORUM, with_quorum, PCMK_XA_MIXED_VERSION, mixed_version_s, NULL); } else { pcmk__output_create_xml_node(out, PCMK_XE_CURRENT_DC, PCMK_XA_PRESENT, PCMK_VALUE_FALSE, NULL); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("maint-mode", "uint64_t") static int cluster_maint_mode_text(pcmk__output_t *out, va_list args) { uint64_t flags = va_arg(args, uint64_t); if (pcmk__is_set(flags, pcmk__sched_in_maintenance)) { pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n"); pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n"); return pcmk_rc_ok; } else if (pcmk__is_set(flags, pcmk__sched_stop_all)) { pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n"); pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n"); return pcmk_rc_ok; } else { return pcmk_rc_no_output; } } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_html(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); if (pcmk__is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { out->list_item(out, NULL, "STONITH of failed nodes enabled"); } else { out->list_item(out, NULL, "STONITH of failed nodes disabled"); } if (pcmk__is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) { out->list_item(out, NULL, "Cluster is symmetric"); } else { out->list_item(out, NULL, "Cluster is asymmetric"); } switch (scheduler->no_quorum_policy) { /* @COMPAT These should say something like "resources that require * quorum" since resources with requires="nothing" are unaffected, but * it would be a good idea to investigate whether any major projects * search for this text first */ case pcmk_no_quorum_freeze: out->list_item(out, NULL, "No quorum policy: Freeze resources"); break; case pcmk_no_quorum_stop: out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); break; case pcmk_no_quorum_demote: out->list_item(out, NULL, "No quorum policy: Demote promotable " "resources and stop all other resources"); break; case pcmk_no_quorum_ignore: out->list_item(out, NULL, "No quorum policy: Ignore"); break; case pcmk_no_quorum_fence: out->list_item(out, NULL, "No quorum policy: Fence nodes in partition"); break; } if (pcmk__is_set(scheduler->flags, pcmk__sched_in_maintenance)) { xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "Resource management: "); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "DISABLED"); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " (the cluster will not attempt to start, stop," " or recover services)"); } else if (pcmk__is_set(scheduler->flags, pcmk__sched_stop_all)) { xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "Resource management: "); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "STOPPED"); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " (the cluster will keep all resources stopped)"); } else { out->list_item(out, NULL, "Resource management: enabled"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_log(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); if (pcmk__is_set(scheduler->flags, pcmk__sched_in_maintenance)) { return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services."); } else if (pcmk__is_set(scheduler->flags, pcmk__sched_stop_all)) { return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources."); } else { return pcmk_rc_no_output; } } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_text(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); if (pcmk__is_set(scheduler->flags, pcmk__sched_fencing_enabled)) { out->list_item(out, NULL, "STONITH of failed nodes enabled"); } else { out->list_item(out, NULL, "STONITH of failed nodes disabled"); } if (pcmk__is_set(scheduler->flags, pcmk__sched_symmetric_cluster)) { out->list_item(out, NULL, "Cluster is symmetric"); } else { out->list_item(out, NULL, "Cluster is asymmetric"); } switch (scheduler->no_quorum_policy) { case pcmk_no_quorum_freeze: out->list_item(out, NULL, "No quorum policy: Freeze resources"); break; case pcmk_no_quorum_stop: out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); break; case pcmk_no_quorum_demote: out->list_item(out, NULL, "No quorum policy: Demote promotable " "resources and stop all other resources"); break; case pcmk_no_quorum_ignore: out->list_item(out, NULL, "No quorum policy: Ignore"); break; case pcmk_no_quorum_fence: out->list_item(out, NULL, "No quorum policy: Fence nodes in partition"); break; } return pcmk_rc_ok; } /*! * \internal * \brief Get readable string representation of a no-quorum policy * * \param[in] policy No-quorum policy * * \return String representation of \p policy */ static const char * no_quorum_policy_text(enum pe_quorum_policy policy) { switch (policy) { case pcmk_no_quorum_freeze: return PCMK_VALUE_FREEZE; case pcmk_no_quorum_stop: return PCMK_VALUE_STOP; case pcmk_no_quorum_demote: return PCMK_VALUE_DEMOTE; case pcmk_no_quorum_ignore: return PCMK_VALUE_IGNORE; case pcmk_no_quorum_fence: return PCMK_VALUE_FENCE; default: return PCMK_VALUE_UNKNOWN; } } PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *") static int cluster_options_xml(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); const char *stonith_enabled = pcmk__flag_text(scheduler->flags, pcmk__sched_fencing_enabled); const char *symmetric_cluster = pcmk__flag_text(scheduler->flags, pcmk__sched_symmetric_cluster); const char *no_quorum_policy = no_quorum_policy_text(scheduler->no_quorum_policy); const char *maintenance_mode = pcmk__flag_text(scheduler->flags, pcmk__sched_in_maintenance); const char *stop_all_resources = pcmk__flag_text(scheduler->flags, pcmk__sched_stop_all); char *stonith_timeout_ms_s = pcmk__assert_asprintf("%u", scheduler->priv->fence_timeout_ms); char *priority_fencing_delay_ms_s = pcmk__assert_asprintf("%u", scheduler->priv->priority_fencing_ms); pcmk__output_create_xml_node(out, PCMK_XE_CLUSTER_OPTIONS, PCMK_XA_STONITH_ENABLED, stonith_enabled, PCMK_XA_SYMMETRIC_CLUSTER, symmetric_cluster, PCMK_XA_NO_QUORUM_POLICY, no_quorum_policy, PCMK_XA_MAINTENANCE_MODE, maintenance_mode, PCMK_XA_STOP_ALL_RESOURCES, stop_all_resources, PCMK_XA_STONITH_TIMEOUT_MS, stonith_timeout_ms_s, PCMK_XA_PRIORITY_FENCING_DELAY_MS, priority_fencing_delay_ms_s, NULL); free(stonith_timeout_ms_s); free(priority_fencing_delay_ms_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state") static int cluster_stack_html(pcmk__output_t *out, va_list args) { const char *stack_s = va_arg(args, const char *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "Stack: "); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%s", stack_s); if (pcmkd_state != pcmk_pacemakerd_state_invalid) { child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " ("); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%s", pcmk__pcmkd_state_enum2friendly(pcmkd_state)); child = pcmk__html_create(node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, ")"); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state") static int cluster_stack_text(pcmk__output_t *out, va_list args) { const char *stack_s = va_arg(args, const char *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); if (pcmkd_state != pcmk_pacemakerd_state_invalid) { out->list_item(out, "Stack", "%s (%s)", stack_s, pcmk__pcmkd_state_enum2friendly(pcmkd_state)); } else { out->list_item(out, "Stack", "%s", stack_s); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state") static int cluster_stack_xml(pcmk__output_t *out, va_list args) { const char *stack_s = va_arg(args, const char *); enum pcmk_pacemakerd_state pcmkd_state = (enum pcmk_pacemakerd_state) va_arg(args, int); const char *state_s = NULL; if (pcmkd_state != pcmk_pacemakerd_state_invalid) { state_s = pcmk_pacemakerd_api_daemon_state_enum2text(pcmkd_state); } pcmk__output_create_xml_node(out, PCMK_XE_STACK, PCMK_XA_TYPE, stack_s, PCMK_XA_PACEMAKERD_STATE, state_s, NULL); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *", "const char *") static int cluster_times_html(pcmk__output_t *out, va_list args) { const char *our_nodename = va_arg(args, const char *); const char *last_written = va_arg(args, const char *); const char *user = va_arg(args, const char *); const char *client = va_arg(args, const char *); const char *origin = va_arg(args, const char *); xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; char *time_s = NULL; child = pcmk__html_create(updated_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "Last updated: "); child = pcmk__html_create(updated_node, PCMK__XE_SPAN, NULL, NULL); time_s = pcmk__epoch2str(NULL, 0); pcmk__xe_set_content(child, "%s", time_s); free(time_s); if (our_nodename != NULL) { child = pcmk__html_create(updated_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, " on "); child = pcmk__html_create(updated_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%s", our_nodename); } child = pcmk__html_create(changed_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "Last change: "); child = pcmk__html_create(changed_node, PCMK__XE_SPAN, NULL, NULL); time_s = last_changed_string(last_written, user, client, origin); pcmk__xe_set_content(child, "%s", time_s); free(time_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *", "const char *") static int cluster_times_xml(pcmk__output_t *out, va_list args) { const char *our_nodename = va_arg(args, const char *); const char *last_written = va_arg(args, const char *); const char *user = va_arg(args, const char *); const char *client = va_arg(args, const char *); const char *origin = va_arg(args, const char *); char *time_s = pcmk__epoch2str(NULL, 0); pcmk__output_create_xml_node(out, PCMK_XE_LAST_UPDATE, PCMK_XA_TIME, time_s, PCMK_XA_ORIGIN, our_nodename, NULL); pcmk__output_create_xml_node(out, PCMK_XE_LAST_CHANGE, PCMK_XA_TIME, pcmk__s(last_written, ""), PCMK_XA_USER, pcmk__s(user, ""), PCMK_XA_CLIENT, pcmk__s(client, ""), PCMK_XA_ORIGIN, pcmk__s(origin, ""), NULL); free(time_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *", "const char *", "const char *", "const char *") static int cluster_times_text(pcmk__output_t *out, va_list args) { const char *our_nodename = va_arg(args, const char *); const char *last_written = va_arg(args, const char *); const char *user = va_arg(args, const char *); const char *client = va_arg(args, const char *); const char *origin = va_arg(args, const char *); char *time_s = pcmk__epoch2str(NULL, 0); out->list_item(out, "Last updated", "%s%s%s", time_s, (our_nodename != NULL)? " on " : "", pcmk__s(our_nodename, "")); free(time_s); time_s = last_changed_string(last_written, user, client, origin); out->list_item(out, "Last change", " %s", time_s); free(time_s); return pcmk_rc_ok; } /*! * \internal * \brief Display a failed action in less-technical natural language * * \param[in,out] out Output object to use for display * \param[in] xml_op XML containing failed action * \param[in] op_key Operation key of failed action * \param[in] node_name Where failed action occurred * \param[in] rc OCF exit code of failed action * \param[in] status Execution status of failed action * \param[in] exit_reason Exit reason given for failed action * \param[in] exec_time String containing execution time in milliseconds */ static void failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op, const char *op_key, const char *node_name, int rc, int status, const char *exit_reason, const char *exec_time) { char *rsc_id = NULL; char *task = NULL; guint interval_ms = 0; time_t last_change_epoch = 0; GString *str = NULL; if (pcmk__str_empty(op_key) || !parse_op_key(op_key, &rsc_id, &task, &interval_ms)) { pcmk__str_update(&rsc_id, "unknown resource"); pcmk__str_update(&task, "unknown action"); interval_ms = 0; } pcmk__assert((rsc_id != NULL) && (task != NULL)); str = g_string_sized_new(256); // Should be sufficient for most messages pcmk__g_strcat(str, rsc_id, " ", NULL); if (interval_ms != 0) { pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ", NULL); } pcmk__g_strcat(str, pcmk__readable_action(task, interval_ms), " on ", node_name, NULL); if (status == PCMK_EXEC_DONE) { pcmk__g_strcat(str, " returned '", crm_exit_str(rc), "'", NULL); if (!pcmk__str_empty(exit_reason)) { pcmk__g_strcat(str, " (", exit_reason, ")", NULL); } } else { pcmk__g_strcat(str, " could not be executed (", pcmk_exec_status_str(status), NULL); if (!pcmk__str_empty(exit_reason)) { pcmk__g_strcat(str, ": ", exit_reason, NULL); } g_string_append_c(str, ')'); } if (pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &last_change_epoch) == pcmk_rc_ok) { char *s = pcmk__epoch2str(&last_change_epoch, 0); pcmk__g_strcat(str, " at ", s, NULL); free(s); } if (!pcmk__str_empty(exec_time)) { int exec_time_ms = 0; if ((pcmk__scan_min_int(exec_time, &exec_time_ms, 0) == pcmk_rc_ok) && (exec_time_ms > 0)) { pcmk__g_strcat(str, " after ", pcmk__readable_interval(exec_time_ms), NULL); } } out->list_item(out, NULL, "%s", str->str); g_string_free(str, TRUE); free(rsc_id); free(task); } /*! * \internal * \brief Display a failed action with technical details * * \param[in,out] out Output object to use for display * \param[in] xml_op XML containing failed action * \param[in] op_key Operation key of failed action * \param[in] node_name Where failed action occurred * \param[in] rc OCF exit code of failed action * \param[in] status Execution status of failed action * \param[in] exit_reason Exit reason given for failed action * \param[in] exec_time String containing execution time in milliseconds */ static void failed_action_technical(pcmk__output_t *out, const xmlNode *xml_op, const char *op_key, const char *node_name, int rc, int status, const char *exit_reason, const char *exec_time) { const char *call_id = pcmk__xe_get(xml_op, PCMK__XA_CALL_ID); const char *queue_time = pcmk__xe_get(xml_op, PCMK_XA_QUEUE_TIME); const char *exit_status = crm_exit_str(rc); const char *lrm_status = pcmk_exec_status_str(status); time_t last_change_epoch = 0; GString *str = NULL; if (pcmk__str_empty(op_key)) { op_key = "unknown operation"; } if (pcmk__str_empty(exit_status)) { exit_status = "unknown exit status"; } if (pcmk__str_empty(call_id)) { call_id = "unknown"; } str = g_string_sized_new(256); g_string_append_printf(str, "%s on %s '%s' (%d): call=%s, status='%s'", op_key, node_name, exit_status, rc, call_id, lrm_status); if (!pcmk__str_empty(exit_reason)) { pcmk__g_strcat(str, ", exitreason='", exit_reason, "'", NULL); } if (pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &last_change_epoch) == pcmk_rc_ok) { char *last_change_str = pcmk__epoch2str(&last_change_epoch, 0); pcmk__g_strcat(str, ", " PCMK_XA_LAST_RC_CHANGE "=" "'", last_change_str, "'", NULL); free(last_change_str); } if (!pcmk__str_empty(queue_time)) { pcmk__g_strcat(str, ", queued=", queue_time, "ms", NULL); } if (!pcmk__str_empty(exec_time)) { pcmk__g_strcat(str, ", exec=", exec_time, "ms", NULL); } out->list_item(out, NULL, "%s", str->str); g_string_free(str, TRUE); } PCMK__OUTPUT_ARGS("failed-action", "xmlNode *", "uint32_t") static int failed_action_default(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); uint32_t show_opts = va_arg(args, uint32_t); const char *op_key = pcmk__xe_history_key(xml_op); const char *node_name = pcmk__xe_get(xml_op, PCMK_XA_UNAME); const char *exit_reason = pcmk__xe_get(xml_op, PCMK_XA_EXIT_REASON); const char *exec_time = pcmk__xe_get(xml_op, PCMK_XA_EXEC_TIME); int rc; int status; pcmk__scan_min_int(pcmk__xe_get(xml_op, PCMK__XA_RC_CODE), &rc, 0); pcmk__scan_min_int(pcmk__xe_get(xml_op, PCMK__XA_OP_STATUS), &status, 0); if (pcmk__str_empty(node_name)) { node_name = "unknown node"; } if (pcmk__is_set(show_opts, pcmk_show_failed_detail)) { failed_action_technical(out, xml_op, op_key, node_name, rc, status, exit_reason, exec_time); } else { failed_action_friendly(out, xml_op, op_key, node_name, rc, status, exit_reason, exec_time); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("failed-action", "xmlNode *", "uint32_t") static int failed_action_xml(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t); const char *op_key = pcmk__xe_history_key(xml_op); const char *op_key_name = PCMK_XA_OP_KEY; int rc; int status; const char *uname = pcmk__xe_get(xml_op, PCMK_XA_UNAME); const char *call_id = pcmk__xe_get(xml_op, PCMK__XA_CALL_ID); const char *exitstatus = NULL; const char *exit_reason = pcmk__s(pcmk__xe_get(xml_op, PCMK_XA_EXIT_REASON), "none"); const char *status_s = NULL; time_t epoch = 0; gchar *exit_reason_esc = NULL; char *rc_s = NULL; xmlNodePtr node = NULL; if (pcmk__xml_needs_escape(exit_reason, pcmk__xml_escape_attr)) { exit_reason_esc = pcmk__xml_escape(exit_reason, pcmk__xml_escape_attr); exit_reason = exit_reason_esc; } pcmk__scan_min_int(pcmk__xe_get(xml_op, PCMK__XA_RC_CODE), &rc, 0); pcmk__scan_min_int(pcmk__xe_get(xml_op, PCMK__XA_OP_STATUS), &status, 0); if (pcmk__xe_get(xml_op, PCMK__XA_OPERATION_KEY) == NULL) { op_key_name = PCMK_XA_ID; } exitstatus = crm_exit_str(rc); rc_s = pcmk__itoa(rc); status_s = pcmk_exec_status_str(status); node = pcmk__output_create_xml_node(out, PCMK_XE_FAILURE, op_key_name, op_key, PCMK_XA_NODE, uname, PCMK_XA_EXITSTATUS, exitstatus, PCMK_XA_EXITREASON, exit_reason, PCMK_XA_EXITCODE, rc_s, PCMK_XA_CALL, call_id, PCMK_XA_STATUS, status_s, NULL); free(rc_s); pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &epoch); if (epoch > 0) { const char *queue_time = pcmk__xe_get(xml_op, PCMK_XA_QUEUE_TIME); const char *exec = pcmk__xe_get(xml_op, PCMK_XA_EXEC_TIME); const char *task = pcmk__xe_get(xml_op, PCMK_XA_OPERATION); guint interval_ms = 0; char *interval_ms_s = NULL; char *rc_change = pcmk__epoch2str(&epoch, crm_time_log_date |crm_time_log_timeofday |crm_time_log_with_timezone); pcmk__xe_get_guint(xml_op, PCMK_META_INTERVAL, &interval_ms); interval_ms_s = pcmk__assert_asprintf("%u", interval_ms); pcmk__xe_set_props(node, PCMK_XA_LAST_RC_CHANGE, rc_change, PCMK_XA_QUEUED, queue_time, PCMK_XA_EXEC, exec, PCMK_XA_INTERVAL, interval_ms_s, PCMK_XA_TASK, task, NULL); free(interval_ms_s); free(rc_change); } g_free(exit_reason_esc); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("failed-action-list", "pcmk_scheduler_t *", "GList *", "GList *", "uint32_t", "bool") static int failed_action_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); xmlNode *xml_op = NULL; int rc = pcmk_rc_no_output; if (xmlChildElementCount(scheduler->priv->failed) == 0) { return rc; } for (xml_op = pcmk__xe_first_child(scheduler->priv->failed, NULL, NULL, NULL); xml_op != NULL; xml_op = pcmk__xe_next(xml_op, NULL)) { char *rsc = NULL; if (!pcmk__str_in_list(pcmk__xe_get(xml_op, PCMK_XA_UNAME), only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } if (pcmk_xe_mask_probe_failure(xml_op)) { continue; } if (!parse_op_key(pcmk__xe_history_key(xml_op), &rsc, NULL, NULL)) { continue; } if (!pcmk__str_in_list(rsc, only_rsc, pcmk__str_star_matches)) { free(rsc); continue; } free(rsc); PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions"); out->message(out, "failed-action", xml_op, show_opts); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } static void status_node(pcmk_node_t *node, xmlNodePtr parent, uint32_t show_opts) { int health = pe__node_health(node); xmlNode *child = NULL; // Cluster membership if (node->details->online) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK_VALUE_ONLINE); pcmk__xe_set_content(child, " online"); } else { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK_VALUE_OFFLINE); pcmk__xe_set_content(child, " OFFLINE"); } // Standby mode if (pcmk__is_set(node->priv->flags, pcmk__node_fail_standby)) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK_VALUE_STANDBY); if (node->details->running_rsc == NULL) { pcmk__xe_set_content(child, " (in standby due to " PCMK_META_ON_FAIL ")"); } else { pcmk__xe_set_content(child, " (in standby due to " PCMK_META_ON_FAIL "," " with active resources)"); } } else if (pcmk__is_set(node->priv->flags, pcmk__node_standby)) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK_VALUE_STANDBY); if (node->details->running_rsc == NULL) { pcmk__xe_set_content(child, " (in standby)"); } else { pcmk__xe_set_content(child, " (in standby, with active resources)"); } } // Maintenance mode if (node->details->maintenance) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK__VALUE_MAINT); pcmk__xe_set_content(child, " (in maintenance mode)"); } // Node health if (health < 0) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK__VALUE_HEALTH_RED); pcmk__xe_set_content(child, " (health is RED)"); } else if (health == 0) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, PCMK__VALUE_HEALTH_YELLOW); pcmk__xe_set_content(child, " (health is YELLOW)"); } // Feature set if (pcmk__is_set(show_opts, pcmk_show_feature_set)) { const char *feature_set = get_node_feature_set(node); if (feature_set != NULL) { child = pcmk__html_create(parent, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, ", feature set %s", feature_set); } } } PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *", "GList *") static int node_html(pcmk__output_t *out, va_list args) { pcmk_node_t *node = va_arg(args, pcmk_node_t *); uint32_t show_opts = va_arg(args, uint32_t); bool full = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); char *node_name = pe__node_display_name(node, pcmk__is_set(show_opts, pcmk_show_node_id)); if (full) { xmlNode *item_node = NULL; xmlNode *child = NULL; if (pcmk__all_flags_set(show_opts, pcmk_show_brief|pcmk_show_rscs_by_node)) { GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); out->begin_list(out, NULL, NULL, "%s:", node_name); item_node = pcmk__output_xml_create_parent(out, "li", NULL); child = pcmk__html_create(item_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "Status:"); status_node(node, item_node, show_opts); if (rscs != NULL) { uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs; out->begin_list(out, NULL, NULL, "Resources"); pe__rscs_brief_output(out, rscs, new_show_opts); out->end_list(out); } pcmk__output_xml_pop_parent(out); out->end_list(out); } else if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { GList *lpc2 = NULL; int rc = pcmk_rc_no_output; out->begin_list(out, NULL, NULL, "%s:", node_name); item_node = pcmk__output_xml_create_parent(out, "li", NULL); child = pcmk__html_create(item_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "Status:"); status_node(node, item_node, show_opts); for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc2->data; PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources"); show_opts |= pcmk_show_rsc_only; out->message(out, (const char *) rsc->priv->xml->name, show_opts, rsc, only_node, only_rsc); } PCMK__OUTPUT_LIST_FOOTER(out, rc); pcmk__output_xml_pop_parent(out); out->end_list(out); } else { item_node = pcmk__output_create_xml_node(out, "li", NULL); child = pcmk__html_create(item_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "%s:", node_name); status_node(node, item_node, show_opts); } } else { out->begin_list(out, NULL, NULL, "%s:", node_name); } free(node_name); return pcmk_rc_ok; } /*! * \internal * \brief Get a human-friendly textual description of a node's status * * \param[in] node Node to check * * \return String representation of node's status */ static const char * node_text_status(const pcmk_node_t *node) { if (node->details->unclean) { if (node->details->online) { return "UNCLEAN (online)"; } else if (node->details->pending) { return "UNCLEAN (pending)"; } else { return "UNCLEAN (offline)"; } } else if (node->details->pending) { return "pending"; } else if (pcmk__is_set(node->priv->flags, pcmk__node_fail_standby) && node->details->online) { return "standby (" PCMK_META_ON_FAIL ")"; } else if (pcmk__is_set(node->priv->flags, pcmk__node_standby)) { if (!node->details->online) { return "OFFLINE (standby)"; } else if (node->details->running_rsc == NULL) { return "standby"; } else { return "standby (with active resources)"; } } else if (node->details->maintenance) { if (node->details->online) { return "maintenance"; } else { return "OFFLINE (maintenance)"; } } else if (node->details->online) { return "online"; } return "OFFLINE"; } PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *", "GList *") static int node_text(pcmk__output_t *out, va_list args) { pcmk_node_t *node = va_arg(args, pcmk_node_t *); uint32_t show_opts = va_arg(args, uint32_t); bool full = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); if (full) { char *node_name = pe__node_display_name(node, pcmk__is_set(show_opts, pcmk_show_node_id)); GString *str = g_string_sized_new(64); int health = pe__node_health(node); // Create a summary line with node type, name, and status if (pcmk__is_guest_or_bundle_node(node)) { g_string_append(str, "GuestNode"); } else if (pcmk__is_remote_node(node)) { g_string_append(str, "RemoteNode"); } else { g_string_append(str, "Node"); } pcmk__g_strcat(str, " ", node_name, ": ", node_text_status(node), NULL); if (health < 0) { g_string_append(str, " (health is RED)"); } else if (health == 0) { g_string_append(str, " (health is YELLOW)"); } if (pcmk__is_set(show_opts, pcmk_show_feature_set)) { const char *feature_set = get_node_feature_set(node); if (feature_set != NULL) { pcmk__g_strcat(str, ", feature set ", feature_set, NULL); } } /* If we're grouping by node, print its resources */ if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { if (pcmk__is_set(show_opts, pcmk_show_brief)) { GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc); if (rscs != NULL) { uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs; out->begin_list(out, NULL, NULL, "%s", str->str); out->begin_list(out, NULL, NULL, "Resources"); pe__rscs_brief_output(out, rscs, new_show_opts); out->end_list(out); out->end_list(out); g_list_free(rscs); } } else { GList *gIter2 = NULL; out->begin_list(out, NULL, NULL, "%s", str->str); out->begin_list(out, NULL, NULL, "Resources"); for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) gIter2->data; show_opts |= pcmk_show_rsc_only; out->message(out, (const char *) rsc->priv->xml->name, show_opts, rsc, only_node, only_rsc); } out->end_list(out); out->end_list(out); } } else { out->list_item(out, NULL, "%s", str->str); } g_string_free(str, TRUE); free(node_name); } else { char *node_name = pe__node_display_name(node, pcmk__is_set(show_opts, pcmk_show_node_id)); out->begin_list(out, NULL, NULL, "Node: %s", node_name); free(node_name); } return pcmk_rc_ok; } /*! * \internal * \brief Convert an integer health value to a string representation * * \param[in] health Integer health value * * \retval \c PCMK_VALUE_RED if \p health is less than 0 * \retval \c PCMK_VALUE_YELLOW if \p health is equal to 0 * \retval \c PCMK_VALUE_GREEN if \p health is greater than 0 */ static const char * health_text(int health) { if (health < 0) { return PCMK_VALUE_RED; } else if (health == 0) { return PCMK_VALUE_YELLOW; } else { return PCMK_VALUE_GREEN; } } /*! * \internal * \brief Convert a node variant to a string representation * * \param[in] variant Node variant * * \retval \c PCMK_VALUE_MEMBER if \p node_type is \c pcmk__node_variant_cluster * \retval \c PCMK_VALUE_REMOTE if \p node_type is \c pcmk__node_variant_remote * \retval \c PCMK_VALUE_UNKNOWN otherwise */ static const char * node_variant_text(enum pcmk__node_variant variant) { switch (variant) { case pcmk__node_variant_cluster: return PCMK_VALUE_MEMBER; case pcmk__node_variant_remote: return PCMK_VALUE_REMOTE; default: return PCMK_VALUE_UNKNOWN; } } PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *", "GList *") static int node_xml(pcmk__output_t *out, va_list args) { pcmk_node_t *node = va_arg(args, pcmk_node_t *); uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t); bool full = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); if (full) { const char *online = pcmk__btoa(node->details->online); const char *standby = pcmk__flag_text(node->priv->flags, pcmk__node_standby); const char *standby_onfail = pcmk__flag_text(node->priv->flags, pcmk__node_fail_standby); const char *maintenance = pcmk__btoa(node->details->maintenance); const char *pending = pcmk__btoa(node->details->pending); const char *unclean = pcmk__btoa(node->details->unclean); const char *health = health_text(pe__node_health(node)); const char *feature_set = get_node_feature_set(node); const char *shutdown = pcmk__btoa(node->details->shutdown); const char *expected_up = pcmk__flag_text(node->priv->flags, pcmk__node_expected_up); const bool is_dc = pcmk__same_node(node, node->priv->scheduler->dc_node); int length = g_list_length(node->details->running_rsc); char *resources_running = pcmk__itoa(length); const char *node_type = node_variant_text(node->priv->variant); int rc = pcmk_rc_ok; rc = pe__name_and_nvpairs_xml(out, true, PCMK_XE_NODE, PCMK_XA_NAME, node->priv->name, PCMK_XA_ID, node->priv->id, PCMK_XA_ONLINE, online, PCMK_XA_STANDBY, standby, PCMK_XA_STANDBY_ONFAIL, standby_onfail, PCMK_XA_MAINTENANCE, maintenance, PCMK_XA_PENDING, pending, PCMK_XA_UNCLEAN, unclean, PCMK_XA_HEALTH, health, PCMK_XA_FEATURE_SET, feature_set, PCMK_XA_SHUTDOWN, shutdown, PCMK_XA_EXPECTED_UP, expected_up, PCMK_XA_IS_DC, pcmk__btoa(is_dc), PCMK_XA_RESOURCES_RUNNING, resources_running, PCMK_XA_TYPE, node_type, NULL); free(resources_running); pcmk__assert(rc == pcmk_rc_ok); if (pcmk__is_guest_or_bundle_node(node)) { xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out); pcmk__xe_set(xml_node, PCMK_XA_ID_AS_RESOURCE, node->priv->remote->priv->launcher->id); } if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { GList *lpc = NULL; for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; show_opts |= pcmk_show_rsc_only; out->message(out, (const char *) rsc->priv->xml->name, show_opts, rsc, only_node, only_rsc); } } out->end_list(out); } else { pcmk__output_xml_create_parent(out, PCMK_XE_NODE, PCMK_XA_NAME, node->priv->name, NULL); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int") static int node_attribute_text(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); bool add_extra = va_arg(args, int); int expected_score = va_arg(args, int); if (add_extra) { int v; if (value == NULL) { v = 0; } else { pcmk__scan_min_int(value, &v, INT_MIN); } if (v <= 0) { out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value); } else if (v < expected_score) { out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score); } else { out->list_item(out, NULL, "%-32s\t: %-10s", name, value); } } else { out->list_item(out, NULL, "%-32s\t: %-10s", name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int") static int node_attribute_html(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); bool add_extra = va_arg(args, int); int expected_score = va_arg(args, int); if (add_extra) { int v = 0; xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL); xmlNode *child = NULL; if (value != NULL) { pcmk__scan_min_int(value, &v, INT_MIN); } child = pcmk__html_create(item_node, PCMK__XE_SPAN, NULL, NULL); pcmk__xe_set_content(child, "%s: %s", name, value); if (v <= 0) { child = pcmk__html_create(item_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "(connectivity is lost)"); } else if (v < expected_score) { child = pcmk__html_create(item_node, PCMK__XE_SPAN, NULL, PCMK__VALUE_BOLD); pcmk__xe_set_content(child, "(connectivity is degraded -- expected %d)", expected_score); } } else { out->list_item(out, NULL, "%s: %s", name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNode *") static int node_and_op(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); xmlNodePtr xml_op = va_arg(args, xmlNodePtr); pcmk_resource_t *rsc = NULL; gchar *node_str = NULL; char *last_change_str = NULL; const char *op_rsc = pcmk__xe_get(xml_op, PCMK_XA_RESOURCE); int status; time_t last_change = 0; pcmk__scan_min_int(pcmk__xe_get(xml_op, PCMK__XA_OP_STATUS), &status, PCMK_EXEC_UNKNOWN); rsc = pe_find_resource(scheduler->priv->resources, op_rsc); if (rsc) { const pcmk_node_t *node = pcmk__current_node(rsc); const char *target_role = g_hash_table_lookup(rsc->priv->meta, PCMK_META_TARGET_ROLE); uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending; if (node == NULL) { node = rsc->priv->pending_node; } node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node, show_opts, target_role, false); } else { node_str = pcmk__assert_asprintf("Unknown resource %s", op_rsc); } if (pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &last_change) == pcmk_rc_ok) { const char *exec_time = pcmk__xe_get(xml_op, PCMK_XA_EXEC_TIME); last_change_str = pcmk__assert_asprintf(", %s='%s', exec=%sms", PCMK_XA_LAST_RC_CHANGE, g_strchomp(ctime(&last_change)), exec_time); } out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s", node_str, pcmk__xe_history_key(xml_op), pcmk__xe_get(xml_op, PCMK_XA_UNAME), pcmk__xe_get(xml_op, PCMK__XA_CALL_ID), pcmk__xe_get(xml_op, PCMK__XA_RC_CODE), last_change_str ? last_change_str : "", pcmk_exec_status_str(status)); g_free(node_str); free(last_change_str); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNode *") static int node_and_op_xml(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); xmlNodePtr xml_op = va_arg(args, xmlNodePtr); pcmk_resource_t *rsc = NULL; const char *uname = pcmk__xe_get(xml_op, PCMK_XA_UNAME); const char *call_id = pcmk__xe_get(xml_op, PCMK__XA_CALL_ID); const char *rc_s = pcmk__xe_get(xml_op, PCMK__XA_RC_CODE); const char *status_s = NULL; const char *op_rsc = pcmk__xe_get(xml_op, PCMK_XA_RESOURCE); int status; time_t last_change = 0; xmlNode *node = NULL; pcmk__scan_min_int(pcmk__xe_get(xml_op, PCMK__XA_OP_STATUS), &status, PCMK_EXEC_UNKNOWN); status_s = pcmk_exec_status_str(status); node = pcmk__output_create_xml_node(out, PCMK_XE_OPERATION, PCMK_XA_OP, pcmk__xe_history_key(xml_op), PCMK_XA_NODE, uname, PCMK_XA_CALL, call_id, PCMK_XA_RC, rc_s, PCMK_XA_STATUS, status_s, NULL); rsc = pe_find_resource(scheduler->priv->resources, op_rsc); if (rsc) { const char *class = pcmk__xe_get(rsc->priv->xml, PCMK_XA_CLASS); const char *provider = pcmk__xe_get(rsc->priv->xml, PCMK_XA_PROVIDER); const char *kind = pcmk__xe_get(rsc->priv->xml, PCMK_XA_TYPE); bool has_provider = pcmk__is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider); char *agent_tuple = pcmk__assert_asprintf("%s:%s:%s", class, (has_provider? provider : ""), kind); pcmk__xe_set_props(node, PCMK_XA_RSC, rsc_printable_id(rsc), PCMK_XA_AGENT, agent_tuple, NULL); free(agent_tuple); } if (pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &last_change) == pcmk_rc_ok) { const char *last_rc_change = g_strchomp(ctime(&last_change)); const char *exec_time = pcmk__xe_get(xml_op, PCMK_XA_EXEC_TIME); pcmk__xe_set_props(node, PCMK_XA_LAST_RC_CHANGE, last_rc_change, PCMK_XA_EXEC_TIME, exec_time, NULL); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int") static int node_attribute_xml(pcmk__output_t *out, va_list args) { const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); bool add_extra = va_arg(args, int); int expected_score = va_arg(args, int); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_ATTRIBUTE, PCMK_XA_NAME, name, PCMK_XA_VALUE, value, NULL); if (add_extra) { char *buf = pcmk__itoa(expected_score); pcmk__xe_set(node, PCMK_XA_EXPECTED, buf); free(buf); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-attribute-list", "pcmk_scheduler_t *", "uint32_t", "bool", "GList *", "GList *") static int node_attribute_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); int rc = pcmk_rc_no_output; /* Display each node's attributes */ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = gIter->data; GList *attr_list = NULL; GHashTableIter iter; gpointer key; if (!node || !node->details || !node->details->online) { continue; } // @TODO Maybe skip filtering for XML output g_hash_table_iter_init(&iter, node->priv->attrs); while (g_hash_table_iter_next (&iter, &key, NULL)) { attr_list = filter_attr_list(attr_list, key); } if (attr_list == NULL) { continue; } if (!pcmk__str_in_list(node->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { g_list_free(attr_list); continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes"); out->message(out, "node", node, show_opts, false, only_node, only_rsc); for (GList *aIter = attr_list; aIter != NULL; aIter = aIter->next) { const char *name = aIter->data; const char *value = NULL; int expected_score = 0; bool add_extra = false; value = pcmk__node_attr(node, name, NULL, pcmk__rsc_node_current); add_extra = add_extra_info(node, node->details->running_rsc, scheduler, name, &expected_score); /* Print attribute name and value */ out->message(out, "node-attribute", name, value, add_extra, expected_score); } g_list_free(attr_list); out->end_list(out); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *") static int node_capacity(pcmk__output_t *out, va_list args) { const pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *comment = va_arg(args, const char *); char *dump_text = pcmk__assert_asprintf("%s: %s capacity:", comment, pcmk__node_name(node)); g_hash_table_foreach(node->priv->utilization, append_dump_text, &dump_text); out->list_item(out, NULL, "%s", dump_text); free(dump_text); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *") static int node_capacity_xml(pcmk__output_t *out, va_list args) { const pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *uname = node->priv->name; const char *comment = va_arg(args, const char *); xmlNodePtr xml_node = pcmk__output_create_xml_node(out, PCMK_XE_CAPACITY, PCMK_XA_NODE, uname, PCMK_XA_COMMENT, comment, NULL); g_hash_table_foreach(node->priv->utilization, add_dump_node, xml_node); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-history-list", "pcmk_scheduler_t *", "pcmk_node_t *", "xmlNode *", "GList *", "GList *", "uint32_t", "uint32_t") static int node_history_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); xmlNode *node_state = va_arg(args, xmlNode *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); xmlNode *lrm_rsc = NULL; xmlNode *rsc_entry = NULL; int rc = pcmk_rc_no_output; lrm_rsc = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL, NULL); lrm_rsc = pcmk__xe_first_child(lrm_rsc, PCMK__XE_LRM_RESOURCES, NULL, NULL); /* Print history of each of the node's resources */ for (rsc_entry = pcmk__xe_first_child(lrm_rsc, PCMK__XE_LRM_RESOURCE, NULL, NULL); rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry, PCMK__XE_LRM_RESOURCE)) { const char *rsc_id = pcmk__xe_get(rsc_entry, PCMK_XA_ID); pcmk_resource_t *rsc = NULL; const pcmk_resource_t *parent = NULL; if (rsc_id == NULL) { continue; // Malformed entry } rsc = pe_find_resource(scheduler->priv->resources, rsc_id); if (rsc == NULL) { continue; // Resource was removed from configuration } /* We can't use is_filtered here to filter group resources. For is_filtered, * we have to decide whether to check the parent or not. If we check the * parent, all elements of a group will always be printed because that's how * is_filtered works for groups. If we do not check the parent, sometimes * this will filter everything out. * * For other resource types, is_filtered is okay. */ parent = pe__const_top_resource(rsc, false); if (pcmk__is_group(parent)) { if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) && !pcmk__str_in_list(rsc_printable_id(parent), only_rsc, pcmk__str_star_matches)) { continue; } } else if (rsc->priv->fns->is_filtered(rsc, only_rsc, true)) { continue; } if (!pcmk__is_set(section_opts, pcmk_section_operations)) { time_t last_failure = 0; int failcount = pe_get_failcount(node, rsc, &last_failure, pcmk__fc_default, NULL); if (failcount <= 0) { continue; } if (rc == pcmk_rc_no_output) { rc = pcmk_rc_ok; out->message(out, "node", node, show_opts, false, only_node, only_rsc); } out->message(out, "resource-history", rsc, rsc_id, false, failcount, last_failure, false); } else { GList *op_list = get_operation_list(rsc_entry); pcmk_resource_t *rsc = NULL; if (op_list == NULL) { continue; } rsc = pe_find_resource(scheduler->priv->resources, pcmk__xe_get(rsc_entry, PCMK_XA_ID)); if (rc == pcmk_rc_no_output) { rc = pcmk_rc_ok; out->message(out, "node", node, show_opts, false, only_node, only_rsc); } out->message(out, "resource-operation-list", scheduler, rsc, node, op_list, show_opts); } } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool") static int node_list_html(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer G_GNUC_UNUSED = va_arg(args, int); int rc = pcmk_rc_no_output; for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; if (!pcmk__str_in_list(node->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Node List"); out->message(out, "node", node, show_opts, true, only_node, only_rsc); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool") static int node_list_text(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); /* space-separated lists of node names */ GString *online_nodes = NULL; GString *online_remote_nodes = NULL; GString *online_guest_nodes = NULL; GString *offline_nodes = NULL; GString *offline_remote_nodes = NULL; int rc = pcmk_rc_no_output; for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; char *node_name = pe__node_display_name(node, pcmk__is_set(show_opts, pcmk_show_node_id)); if (!pcmk__str_in_list(node->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { free(node_name); continue; } PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node List"); // Determine whether to display node individually or in a list if (node->details->unclean || node->details->pending || (pcmk__is_set(node->priv->flags, pcmk__node_fail_standby) && node->details->online) || pcmk__is_set(node->priv->flags, pcmk__node_standby) || node->details->maintenance || pcmk__is_set(show_opts, pcmk_show_rscs_by_node) || pcmk__is_set(show_opts, pcmk_show_feature_set) || (pe__node_health(node) <= 0)) { // Display node individually } else if (node->details->online) { // Display online node in a list if (pcmk__is_guest_or_bundle_node(node)) { pcmk__add_word(&online_guest_nodes, 1024, node_name); } else if (pcmk__is_remote_node(node)) { pcmk__add_word(&online_remote_nodes, 1024, node_name); } else { pcmk__add_word(&online_nodes, 1024, node_name); } free(node_name); continue; } else { // Display offline node in a list if (pcmk__is_remote_node(node)) { pcmk__add_word(&offline_remote_nodes, 1024, node_name); } else if (pcmk__is_guest_or_bundle_node(node)) { /* ignore offline guest nodes */ } else { pcmk__add_word(&offline_nodes, 1024, node_name); } free(node_name); continue; } /* If we get here, node is in bad state, or we're grouping by node */ out->message(out, "node", node, show_opts, true, only_node, only_rsc); free(node_name); } /* If we're not grouping by node, summarize nodes by status */ if (online_nodes != NULL) { out->list_item(out, "Online", "[ %s ]", (const char *) online_nodes->str); g_string_free(online_nodes, TRUE); } if (offline_nodes != NULL) { out->list_item(out, "OFFLINE", "[ %s ]", (const char *) offline_nodes->str); g_string_free(offline_nodes, TRUE); } if (online_remote_nodes) { out->list_item(out, "RemoteOnline", "[ %s ]", (const char *) online_remote_nodes->str); g_string_free(online_remote_nodes, TRUE); } if (offline_remote_nodes) { out->list_item(out, "RemoteOFFLINE", "[ %s ]", (const char *) offline_remote_nodes->str); g_string_free(offline_remote_nodes, TRUE); } if (online_guest_nodes != NULL) { out->list_item(out, "GuestOnline", "[ %s ]", (const char *) online_guest_nodes->str); g_string_free(online_guest_nodes, TRUE); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool") static int node_list_xml(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer G_GNUC_UNUSED = va_arg(args, int); /* PCMK_XE_NODES acts as the list's element name for CLI tools that use * pcmk__output_enable_list_element. Otherwise PCMK_XE_NODES is the * value of the list's PCMK_XA_NAME attribute. */ out->begin_list(out, NULL, NULL, PCMK_XE_NODES); for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) { pcmk_node_t *node = (pcmk_node_t *) gIter->data; if (!pcmk__str_in_list(node->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } out->message(out, "node", node, show_opts, true, only_node, only_rsc); } out->end_list(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-summary", "pcmk_scheduler_t *", "GList *", "GList *", "uint32_t", "uint32_t", "bool") static int node_summary(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); uint32_t section_opts = va_arg(args, uint32_t); uint32_t show_opts = va_arg(args, uint32_t); bool print_spacer = va_arg(args, int); xmlNode *node_state = NULL; xmlNode *cib_status = pcmk_find_cib_element(scheduler->input, PCMK_XE_STATUS); int rc = pcmk_rc_no_output; if (xmlChildElementCount(cib_status) == 0) { return rc; } for (node_state = pcmk__xe_first_child(cib_status, PCMK__XE_NODE_STATE, NULL, NULL); node_state != NULL; node_state = pcmk__xe_next(node_state, PCMK__XE_NODE_STATE)) { pcmk_node_t *node = pe_find_node_id(scheduler->nodes, pcmk__xe_id(node_state)); const bool operations = pcmk__is_set(section_opts, pcmk_section_operations); if (!node || !node->details || !node->details->online) { continue; } if (!pcmk__str_in_list(node->priv->name, only_node, pcmk__str_star_matches|pcmk__str_casei)) { continue; } if (operations) { PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Operations"); } else { PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Migration Summary"); } out->message(out, "node-history-list", scheduler, node, node_state, only_node, only_rsc, section_opts, show_opts); } PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *", "const char *", "const char *") static int node_weight(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); const char *prefix = va_arg(args, const char *); const char *uname = va_arg(args, const char *); const char *score = va_arg(args, const char *); if (rsc) { out->list_item(out, NULL, "%s: %s allocation score on %s: %s", prefix, rsc->id, uname, score); } else { out->list_item(out, NULL, "%s: %s = %s", prefix, uname, score); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *", "const char *", "const char *") static int node_weight_xml(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); const char *prefix = va_arg(args, const char *); const char *uname = va_arg(args, const char *); const char *score = va_arg(args, const char *); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_NODE_WEIGHT, PCMK_XA_FUNCTION, prefix, PCMK_XA_NODE, uname, PCMK_XA_SCORE, score, NULL); if (rsc) { pcmk__xe_set(node, PCMK_XA_ID, rsc->id); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("op-history", "xmlNode *", "const char *", "const char *", "int", "uint32_t") static int op_history_text(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); const char *task = va_arg(args, const char *); const char *interval_ms_s = va_arg(args, const char *); int rc = va_arg(args, int); uint32_t show_opts = va_arg(args, uint32_t); char *buf = op_history_string(xml_op, task, interval_ms_s, rc, pcmk__is_set(show_opts, pcmk_show_timing)); out->list_item(out, NULL, "%s", buf); free(buf); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("op-history", "xmlNode *", "const char *", "const char *", "int", "uint32_t") static int op_history_xml(pcmk__output_t *out, va_list args) { xmlNodePtr xml_op = va_arg(args, xmlNodePtr); const char *task = va_arg(args, const char *); const char *interval_ms_s = va_arg(args, const char *); int rc = va_arg(args, int); uint32_t show_opts = va_arg(args, uint32_t); const char *call_id = pcmk__xe_get(xml_op, PCMK__XA_CALL_ID); char *rc_s = pcmk__itoa(rc); const char *rc_text = crm_exit_str(rc); xmlNodePtr node = NULL; node = pcmk__output_create_xml_node(out, PCMK_XE_OPERATION_HISTORY, PCMK_XA_CALL, call_id, PCMK_XA_TASK, task, PCMK_XA_RC, rc_s, PCMK_XA_RC_TEXT, rc_text, NULL); free(rc_s); if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) { char *s = pcmk__assert_asprintf("%sms", interval_ms_s); pcmk__xe_set(node, PCMK_XA_INTERVAL, s); free(s); } if (pcmk__is_set(show_opts, pcmk_show_timing)) { const char *value = NULL; time_t epoch = 0; pcmk__xe_get_time(xml_op, PCMK_XA_LAST_RC_CHANGE, &epoch); if (epoch > 0) { char *s = pcmk__epoch2str(&epoch, 0); pcmk__xe_set(node, PCMK_XA_LAST_RC_CHANGE, s); free(s); } value = pcmk__xe_get(xml_op, PCMK_XA_EXEC_TIME); if (value) { char *s = pcmk__assert_asprintf("%sms", value); pcmk__xe_set(node, PCMK_XA_EXEC_TIME, s); free(s); } value = pcmk__xe_get(xml_op, PCMK_XA_QUEUE_TIME); if (value) { char *s = pcmk__assert_asprintf("%sms", value); pcmk__xe_set(node, PCMK_XA_QUEUE_TIME, s); free(s); } } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int promotion_score(pcmk__output_t *out, va_list args) { pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *chosen = va_arg(args, pcmk_node_t *); const char *score = va_arg(args, const char *); if (chosen == NULL) { out->list_item(out, NULL, "%s promotion score (inactive): %s", child_rsc->id, score); } else { out->list_item(out, NULL, "%s promotion score on %s: %s", child_rsc->id, pcmk__node_name(chosen), score); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int promotion_score_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *chosen = va_arg(args, pcmk_node_t *); const char *score = va_arg(args, const char *); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_PROMOTION_SCORE, PCMK_XA_ID, child_rsc->id, PCMK_XA_SCORE, score, NULL); if (chosen) { pcmk__xe_set(node, PCMK_XA_NODE, chosen->priv->name); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool") static int resource_config(pcmk__output_t *out, va_list args) { const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *); GString *xml_buf = g_string_sized_new(1024); bool raw = va_arg(args, int); formatted_xml_buf(rsc, xml_buf, raw); out->output_xml(out, PCMK_XE_XML, xml_buf->str); g_string_free(xml_buf, TRUE); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool") static int resource_config_text(pcmk__output_t *out, va_list args) { pcmk__formatted_printf(out, "Resource XML:\n"); return resource_config(out, args); } PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *", "bool", "int", "time_t", "bool") static int resource_history_text(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *rsc_id = va_arg(args, const char *); bool all = va_arg(args, int); int failcount = va_arg(args, int); time_t last_failure = va_arg(args, time_t); bool as_header = va_arg(args, int); char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure); if (as_header) { out->begin_list(out, NULL, NULL, "%s", buf); } else { out->list_item(out, NULL, "%s", buf); } free(buf); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *", "bool", "int", "time_t", "bool") static int resource_history_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *rsc_id = va_arg(args, const char *); bool all = va_arg(args, int); int failcount = va_arg(args, int); time_t last_failure = va_arg(args, time_t); bool as_header = va_arg(args, int); xmlNodePtr node = pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE_HISTORY, PCMK_XA_ID, rsc_id, NULL); // @COMPAT PCMK_XA_ORPHAN is deprecated since 3.0.2 if (rsc == NULL) { - pcmk__xe_set_bool_attr(node, PCMK_XA_ORPHAN, true); - pcmk__xe_set_bool_attr(node, PCMK_XA_REMOVED, true); + pcmk__xe_set_bool(node, PCMK_XA_ORPHAN, true); + pcmk__xe_set_bool(node, PCMK_XA_REMOVED, true); } else if (all || failcount || last_failure > 0) { char *migration_s = pcmk__itoa(rsc->priv->ban_after_failures); pcmk__xe_set_props(node, PCMK_XA_ORPHAN, PCMK_VALUE_FALSE, PCMK_XA_REMOVED, PCMK_VALUE_FALSE, PCMK_META_MIGRATION_THRESHOLD, migration_s, NULL); free(migration_s); if (failcount > 0) { char *s = pcmk__itoa(failcount); pcmk__xe_set(node, PCMK_XA_FAIL_COUNT, s); free(s); } if (last_failure > 0) { char *s = pcmk__epoch2str(&last_failure, 0); pcmk__xe_set(node, PCMK_XA_LAST_FAILURE, s); free(s); } } if (!as_header) { pcmk__output_xml_pop_parent(out); } return pcmk_rc_ok; } static void print_resource_header(pcmk__output_t *out, uint32_t show_opts) { if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { /* Active resources have already been printed by node */ out->begin_list(out, NULL, NULL, "Inactive Resources"); } else if (pcmk__is_set(show_opts, pcmk_show_inactive_rscs)) { out->begin_list(out, NULL, NULL, "Full List of Resources"); } else { out->begin_list(out, NULL, NULL, "Active Resources"); } } PCMK__OUTPUT_ARGS("resource-list", "pcmk_scheduler_t *", "uint32_t", "bool", "GList *", "GList *", "bool") static int resource_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *); uint32_t show_opts = va_arg(args, uint32_t); bool print_summary = va_arg(args, int); GList *only_node = va_arg(args, GList *); GList *only_rsc = va_arg(args, GList *); bool print_spacer = va_arg(args, int); GList *rsc_iter; int rc = pcmk_rc_no_output; bool printed_header = false; /* If we already showed active resources by node, and * we're not showing inactive resources, we have nothing to do */ if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node) && !pcmk__is_set(show_opts, pcmk_show_inactive_rscs)) { return rc; } /* If we haven't already printed resources grouped by node, * and brief output was requested, print resource summary */ if (pcmk__is_set(show_opts, pcmk_show_brief) && !pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { GList *rscs = pe__filter_rsc_list(scheduler->priv->resources, only_rsc); PCMK__OUTPUT_SPACER_IF(out, print_spacer); print_resource_header(out, show_opts); printed_header = true; rc = pe__rscs_brief_output(out, rscs, show_opts); g_list_free(rscs); } /* For each resource, display it if appropriate */ for (rsc_iter = scheduler->priv->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) rsc_iter->data; int x; /* Complex resources may have some sub-resources active and some inactive */ bool is_active = rsc->priv->fns->active(rsc, true); bool partially_active = rsc->priv->fns->active(rsc, false); // Skip inactive removed resources (deleted but still in CIB) if (pcmk__is_set(rsc->flags, pcmk__rsc_removed) && !is_active) { continue; } /* Skip active resources if we already displayed them by node */ if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { if (is_active) { continue; } /* Skip primitives already counted in a brief summary */ } else if (pcmk__is_set(show_opts, pcmk_show_brief) && pcmk__is_primitive(rsc)) { continue; /* Skip resources that aren't at least partially active, * unless we're displaying inactive resources */ } else if (!partially_active && !pcmk__is_set(show_opts, pcmk_show_inactive_rscs)) { continue; } else if (partially_active && !pe__rsc_running_on_any(rsc, only_node)) { continue; } if (!printed_header) { PCMK__OUTPUT_SPACER_IF(out, print_spacer); print_resource_header(out, show_opts); printed_header = true; } /* Print this resource */ x = out->message(out, (const char *) rsc->priv->xml->name, show_opts, rsc, only_node, only_rsc); if (x == pcmk_rc_ok) { rc = pcmk_rc_ok; } } if (print_summary && rc != pcmk_rc_ok) { if (!printed_header) { PCMK__OUTPUT_SPACER_IF(out, print_spacer); print_resource_header(out, show_opts); printed_header = true; } /* @FIXME It looks as if we can return pcmk_rc_no_output even after * writing output here. */ if (pcmk__is_set(show_opts, pcmk_show_rscs_by_node)) { out->list_item(out, NULL, "No inactive resources"); } else if (pcmk__is_set(show_opts, pcmk_show_inactive_rscs)) { out->list_item(out, NULL, "No resources"); } else { out->list_item(out, NULL, "No active resources"); } } if (printed_header) { out->end_list(out); } return rc; } PCMK__OUTPUT_ARGS("resource-operation-list", "pcmk_scheduler_t *", "pcmk_resource_t *", "pcmk_node_t *", "GList *", "uint32_t") static int resource_operation_list(pcmk__output_t *out, va_list args) { pcmk_scheduler_t *scheduler G_GNUC_UNUSED = va_arg(args, pcmk_scheduler_t *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); GList *op_list = va_arg(args, GList *); uint32_t show_opts = va_arg(args, uint32_t); GList *gIter = NULL; int rc = pcmk_rc_no_output; /* Print each operation */ for (gIter = op_list; gIter != NULL; gIter = gIter->next) { xmlNode *xml_op = (xmlNode *) gIter->data; const char *task = pcmk__xe_get(xml_op, PCMK_XA_OPERATION); const char *interval_ms_s = pcmk__xe_get(xml_op, PCMK_META_INTERVAL); const char *op_rc = pcmk__xe_get(xml_op, PCMK__XA_RC_CODE); int op_rc_i; pcmk__scan_min_int(op_rc, &op_rc_i, 0); /* Display 0-interval monitors as "probe" */ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei) && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) { task = "probe"; } /* If this is the first printed operation, print heading for resource */ if (rc == pcmk_rc_no_output) { time_t last_failure = 0; int failcount = pe_get_failcount(node, rsc, &last_failure, pcmk__fc_default, NULL); out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true, failcount, last_failure, true); rc = pcmk_rc_ok; } /* Print the operation */ out->message(out, "op-history", xml_op, task, interval_ms_s, op_rc_i, show_opts); } /* Free the list we created (no need to free the individual items) */ g_list_free(op_list); PCMK__OUTPUT_LIST_FOOTER(out, rc); return rc; } PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int resource_util(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *fn = va_arg(args, const char *); char *dump_text = pcmk__assert_asprintf("%s: %s utilization on %s:", fn, rsc->id, pcmk__node_name(node)); g_hash_table_foreach(rsc->priv->utilization, append_dump_text, &dump_text); out->list_item(out, NULL, "%s", dump_text); free(dump_text); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *", "const char *") static int resource_util_xml(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *uname = node->priv->name; const char *fn = va_arg(args, const char *); xmlNodePtr xml_node = NULL; xml_node = pcmk__output_create_xml_node(out, PCMK_XE_UTILIZATION, PCMK_XA_RESOURCE, rsc->id, PCMK_XA_NODE, uname, PCMK_XA_FUNCTION, fn, NULL); g_hash_table_foreach(rsc->priv->utilization, add_dump_node, xml_node); return pcmk_rc_ok; } static inline const char * ticket_status(pcmk__ticket_t *ticket) { if (pcmk__is_set(ticket->flags, pcmk__ticket_granted)) { return PCMK_VALUE_GRANTED; } return PCMK_VALUE_REVOKED; } static inline const char * ticket_standby_text(pcmk__ticket_t *ticket) { return pcmk__is_set(ticket->flags, pcmk__ticket_standby)? " [standby]" : ""; } PCMK__OUTPUT_ARGS("ticket", "pcmk__ticket_t *", "bool", "bool") static int ticket_default(pcmk__output_t *out, va_list args) { pcmk__ticket_t *ticket = va_arg(args, pcmk__ticket_t *); bool raw = va_arg(args, int); bool details = va_arg(args, int); GString *detail_str = NULL; if (raw) { out->list_item(out, ticket->id, "%s", ticket->id); return pcmk_rc_ok; } if (details && g_hash_table_size(ticket->state) > 0) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; bool already_added = false; detail_str = g_string_sized_new(100); pcmk__g_strcat(detail_str, "\t(", NULL); g_hash_table_iter_init(&iter, ticket->state); while (g_hash_table_iter_next(&iter, (void **) &name, (void **) &value)) { if (already_added) { g_string_append_printf(detail_str, ", %s=", name); } else { g_string_append_printf(detail_str, "%s=", name); already_added = true; } if (pcmk__str_any_of(name, PCMK_XA_LAST_GRANTED, "expires", NULL)) { char *epoch_str = NULL; long long time_ll; (void) pcmk__scan_ll(value, &time_ll, 0); epoch_str = pcmk__epoch2str((const time_t *) &time_ll, 0); pcmk__g_strcat(detail_str, epoch_str, NULL); free(epoch_str); } else { pcmk__g_strcat(detail_str, value, NULL); } } pcmk__g_strcat(detail_str, ")", NULL); } if (ticket->last_granted > -1) { /* Prior to the introduction of the details & raw arguments to this * function, last-granted would always be added in this block. We need * to preserve that behavior. At the same time, we also need to preserve * the existing behavior from crm_ticket, which would include last-granted * as part of the (...) detail string. * * Luckily we can check detail_str - if it's NULL, either there were no * details, or we are preserving the previous behavior of this function. * If it's not NULL, we are either preserving the previous behavior of * crm_ticket or we were given details=true as an argument. */ if (detail_str == NULL) { char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0); out->list_item(out, NULL, "%s\t%s%s last-granted=\"%s\"", ticket->id, ticket_status(ticket), ticket_standby_text(ticket), pcmk__s(epoch_str, "")); free(epoch_str); } else { out->list_item(out, NULL, "%s\t%s%s %s", ticket->id, ticket_status(ticket), ticket_standby_text(ticket), detail_str->str); } } else { out->list_item(out, NULL, "%s\t%s%s%s", ticket->id, ticket_status(ticket), ticket_standby_text(ticket), detail_str != NULL ? detail_str->str : ""); } if (detail_str != NULL) { g_string_free(detail_str, TRUE); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket", "pcmk__ticket_t *", "bool", "bool") static int ticket_xml(pcmk__output_t *out, va_list args) { pcmk__ticket_t *ticket = va_arg(args, pcmk__ticket_t *); bool raw G_GNUC_UNUSED = va_arg(args, int); bool details G_GNUC_UNUSED = va_arg(args, int); const char *standby = pcmk__flag_text(ticket->flags, pcmk__ticket_standby); xmlNodePtr node = NULL; GHashTableIter iter; const char *name = NULL; const char *value = NULL; node = pcmk__output_create_xml_node(out, PCMK_XE_TICKET, PCMK_XA_ID, ticket->id, PCMK_XA_STATUS, ticket_status(ticket), PCMK_XA_STANDBY, standby, NULL); if (ticket->last_granted > -1) { char *buf = pcmk__epoch2str(&ticket->last_granted, 0); pcmk__xe_set(node, PCMK_XA_LAST_GRANTED, buf); free(buf); } g_hash_table_iter_init(&iter, ticket->state); while (g_hash_table_iter_next(&iter, (void **) &name, (void **) &value)) { /* PCMK_XA_LAST_GRANTED and "expires" are already added by the check * for ticket->last_granted above. */ if (pcmk__str_any_of(name, PCMK_XA_LAST_GRANTED, PCMK_XA_EXPIRES, NULL)) { continue; } pcmk__xe_set(node, name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("ticket-list", "GHashTable *", "bool", "bool", "bool") static int ticket_list(pcmk__output_t *out, va_list args) { GHashTable *tickets = va_arg(args, GHashTable *); bool print_spacer = va_arg(args, int); bool raw = va_arg(args, int); bool details = va_arg(args, int); GHashTableIter iter; gpointer value; if (g_hash_table_size(tickets) == 0) { return pcmk_rc_no_output; } PCMK__OUTPUT_SPACER_IF(out, print_spacer); /* Print section heading */ out->begin_list(out, NULL, NULL, "Tickets"); /* Print each ticket */ g_hash_table_iter_init(&iter, tickets); while (g_hash_table_iter_next(&iter, NULL, &value)) { pcmk__ticket_t *ticket = (pcmk__ticket_t *) value; out->message(out, "ticket", ticket, raw, details); } /* Close section */ out->end_list(out); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "ban", "default", ban_text }, { "ban", "html", ban_html }, { "ban", "xml", ban_xml }, { "ban-list", "default", ban_list }, { "bundle", "default", pe__bundle_text }, { "bundle", "xml", pe__bundle_xml }, { "bundle", "html", pe__bundle_html }, { "clone", "default", pe__clone_default }, { "clone", "xml", pe__clone_xml }, { "cluster-counts", "default", cluster_counts_text }, { "cluster-counts", "html", cluster_counts_html }, { "cluster-counts", "xml", cluster_counts_xml }, { "cluster-dc", "default", cluster_dc_text }, { "cluster-dc", "html", cluster_dc_html }, { "cluster-dc", "xml", cluster_dc_xml }, { "cluster-options", "default", cluster_options_text }, { "cluster-options", "html", cluster_options_html }, { "cluster-options", "log", cluster_options_log }, { "cluster-options", "xml", cluster_options_xml }, { "cluster-summary", "default", cluster_summary }, { "cluster-summary", "html", cluster_summary_html }, { "cluster-stack", "default", cluster_stack_text }, { "cluster-stack", "html", cluster_stack_html }, { "cluster-stack", "xml", cluster_stack_xml }, { "cluster-times", "default", cluster_times_text }, { "cluster-times", "html", cluster_times_html }, { "cluster-times", "xml", cluster_times_xml }, { "failed-action", "default", failed_action_default }, { "failed-action", "xml", failed_action_xml }, { "failed-action-list", "default", failed_action_list }, { "group", "default", pe__group_default}, { "group", "xml", pe__group_xml }, { "maint-mode", "text", cluster_maint_mode_text }, { "node", "default", node_text }, { "node", "html", node_html }, { "node", "xml", node_xml }, { "node-and-op", "default", node_and_op }, { "node-and-op", "xml", node_and_op_xml }, { "node-capacity", "default", node_capacity }, { "node-capacity", "xml", node_capacity_xml }, { "node-history-list", "default", node_history_list }, { "node-list", "default", node_list_text }, { "node-list", "html", node_list_html }, { "node-list", "xml", node_list_xml }, { "node-weight", "default", node_weight }, { "node-weight", "xml", node_weight_xml }, { "node-attribute", "default", node_attribute_text }, { "node-attribute", "html", node_attribute_html }, { "node-attribute", "xml", node_attribute_xml }, { "node-attribute-list", "default", node_attribute_list }, { "node-summary", "default", node_summary }, { "op-history", "default", op_history_text }, { "op-history", "xml", op_history_xml }, { "primitive", "default", pe__resource_text }, { "primitive", "xml", pe__resource_xml }, { "primitive", "html", pe__resource_html }, { "promotion-score", "default", promotion_score }, { "promotion-score", "xml", promotion_score_xml }, { "resource-config", "default", resource_config }, { "resource-config", "text", resource_config_text }, { "resource-history", "default", resource_history_text }, { "resource-history", "xml", resource_history_xml }, { "resource-list", "default", resource_list }, { "resource-operation-list", "default", resource_operation_list }, { "resource-util", "default", resource_util }, { "resource-util", "xml", resource_util_xml }, { "ticket", "default", ticket_default }, { "ticket", "xml", ticket_xml }, { "ticket-list", "default", ticket_list }, { NULL, NULL, NULL } }; void pe__register_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); } diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c index aab305fa66..f5f7525021 100644 --- a/tools/crm_resource_print.c +++ b/tools/crm_resource_print.c @@ -1,888 +1,888 @@ /* * Copyright 2004-2025 the Pacemaker project contributors * * The version control history for this file may have further details. * * This source code is licensed under the GNU General Public License version 2 * or later (GPLv2+) WITHOUT ANY WARRANTY. */ #include #include #include #include #include #include #define cons_string(x) x?x:"NA" static int print_constraint(xmlNode *xml_obj, void *userdata) { pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) userdata; pcmk__output_t *out = scheduler->priv->out; const char *id = pcmk__xe_get(xml_obj, PCMK_XA_ID); if (id == NULL) { return pcmk_rc_ok; } if (!pcmk__xe_is(xml_obj, PCMK_XE_RSC_COLOCATION)) { return pcmk_rc_ok; } out->info(out, "Constraint %s %s %s %s %s %s %s", xml_obj->name, cons_string(pcmk__xe_get(xml_obj, PCMK_XA_ID)), cons_string(pcmk__xe_get(xml_obj, PCMK_XA_RSC)), cons_string(pcmk__xe_get(xml_obj, PCMK_XA_WITH_RSC)), cons_string(pcmk__xe_get(xml_obj, PCMK_XA_SCORE)), cons_string(pcmk__xe_get(xml_obj, PCMK_XA_RSC_ROLE)), cons_string(pcmk__xe_get(xml_obj, PCMK_XA_WITH_RSC_ROLE))); return pcmk_rc_ok; } void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler) { pcmk__xe_foreach_child(pcmk_find_cib_element(scheduler->input, PCMK_XE_CONSTRAINTS), NULL, print_constraint, scheduler); } void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out) { const char *host = NULL; bool needs_quorum = TRUE; const char *rtype = pcmk__xe_get(rsc->priv->xml, PCMK_XA_TYPE); const char *rprov = pcmk__xe_get(rsc->priv->xml, PCMK_XA_PROVIDER); const char *rclass = pcmk__xe_get(rsc->priv->xml, PCMK_XA_CLASS); pcmk_node_t *node = pcmk__current_node(rsc); if (pcmk__is_set(rsc->flags, pcmk__rsc_fence_device)) { needs_quorum = FALSE; } else { // @TODO check requires in resource meta-data and rsc_defaults } if (node != NULL) { host = node->priv->name; } out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld %#.16llx", rsc->priv->xml->name, rsc->id, pcmk__s(rsc->priv->history_id, rsc->id), ((rsc->priv->parent == NULL)? "NA" : rsc->priv->parent->id), rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags, rsc->flags); g_list_foreach(rsc->priv->children, (GFunc) cli_resource_print_cts, out); } // \return Standard Pacemaker return code int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pcmk_scheduler_t *scheduler) { pcmk__output_t *out = scheduler->priv->out; int rc = pcmk_rc_no_output; GList *ops = find_operations(rsc_id, host_uname, active, scheduler); if (!ops) { return rc; } out->begin_list(out, NULL, NULL, "Resource Operations"); rc = pcmk_rc_ok; for (GList *lpc = ops; lpc != NULL; lpc = lpc->next) { xmlNode *xml_op = (xmlNode *) lpc->data; out->message(out, "node-and-op", scheduler, xml_op); } out->end_list(out); return rc; } // \return Standard Pacemaker return code int cli_resource_print(pcmk_resource_t *rsc, bool expanded) { pcmk_scheduler_t *scheduler = NULL; pcmk__output_t *out = NULL; GList *all = NULL; pcmk__assert(rsc != NULL); scheduler = rsc->priv->scheduler; out = scheduler->priv->out; all = g_list_prepend(all, (gpointer) "*"); out->begin_list(out, NULL, NULL, "Resource Config"); out->message(out, (const char *) rsc->priv->xml->name, pcmk_show_pending, rsc, all, all); out->message(out, "resource-config", rsc, !expanded); out->end_list(out); g_list_free(all); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed", "attr_update_data_t *") static int attribute_changed_default(pcmk__output_t *out, va_list args) { attr_update_data_t *ud = va_arg(args, attr_update_data_t *); out->info(out, "Set '%s' option: " PCMK_XA_ID "=%s%s%s%s%s value=%s", ud->given_rsc_id, ud->found_attr_id, ((ud->attr_set_id == NULL)? "" : " " PCMK__XA_SET "="), pcmk__s(ud->attr_set_id, ""), ((ud->attr_name == NULL)? "" : " " PCMK_XA_NAME "="), pcmk__s(ud->attr_name, ""), ud->attr_value); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed", "attr_update_data_t *") static int attribute_changed_xml(pcmk__output_t *out, va_list args) { attr_update_data_t *ud = va_arg(args, attr_update_data_t *); pcmk__output_xml_create_parent(out, (const char *) ud->rsc->priv->xml->name, PCMK_XA_ID, ud->rsc->id, NULL); pcmk__output_xml_create_parent(out, ud->attr_set_type, PCMK_XA_ID, ud->attr_set_id, NULL); pcmk__output_create_xml_node(out, PCMK_XE_NVPAIR, PCMK_XA_ID, ud->found_attr_id, PCMK_XA_VALUE, ud->attr_value, PCMK_XA_NAME, ud->attr_name, NULL); pcmk__output_xml_pop_parent(out); pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed-list", "GList *") static int attribute_changed_list_default(pcmk__output_t *out, va_list args) { GList *results = va_arg(args, GList *); if (results == NULL) { return pcmk_rc_no_output; } for (GList *iter = results; iter != NULL; iter = iter->next) { attr_update_data_t *ud = iter->data; out->message(out, "attribute-changed", ud); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-changed-list", "GList *") static int attribute_changed_list_xml(pcmk__output_t *out, va_list args) { GList *results = va_arg(args, GList *); if (results == NULL) { return pcmk_rc_no_output; } pcmk__output_xml_create_parent(out, PCMK__XE_RESOURCE_SETTINGS, NULL); for (GList *iter = results; iter != NULL; iter = iter->next) { attr_update_data_t *ud = iter->data; out->message(out, "attribute-changed", ud); } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *", "const char *") static int attribute_list_default(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = va_arg(args, const char *); if (value != NULL) { out->begin_list(out, NULL, NULL, "Attributes"); out->list_item(out, attr, "%s", value); out->end_list(out); return pcmk_rc_ok; } else { out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "crm_exit_t", "const char *") static int agent_status_default(pcmk__output_t *out, va_list args) { int status = va_arg(args, int); const char *action = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *class = va_arg(args, const char *); const char *provider = va_arg(args, const char *); const char *type = va_arg(args, const char *); crm_exit_t rc = va_arg(args, crm_exit_t); const char *exit_reason = va_arg(args, const char *); if (status == PCMK_EXEC_DONE) { /* Operation [for ] ([:]:) * returned ([: ]) */ out->info(out, "Operation %s%s%s (%s%s%s:%s) returned %d (%s%s%s)", action, ((name == NULL)? "" : " for "), ((name == NULL)? "" : name), class, ((provider == NULL)? "" : ":"), ((provider == NULL)? "" : provider), type, (int) rc, crm_exit_str(rc), ((exit_reason == NULL)? "" : ": "), ((exit_reason == NULL)? "" : exit_reason)); } else { /* Operation [for ] ([:]:) * could not be executed ([: ]) */ out->err(out, "Operation %s%s%s (%s%s%s:%s) could not be executed (%s%s%s)", action, ((name == NULL)? "" : " for "), ((name == NULL)? "" : name), class, ((provider == NULL)? "" : ":"), ((provider == NULL)? "" : provider), type, pcmk_exec_status_str(status), ((exit_reason == NULL)? "" : ": "), ((exit_reason == NULL)? "" : exit_reason)); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("agent-status", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "crm_exit_t", "const char *") static int agent_status_xml(pcmk__output_t *out, va_list args) { int status = va_arg(args, int); const char *action G_GNUC_UNUSED = va_arg(args, const char *); const char *name G_GNUC_UNUSED = va_arg(args, const char *); const char *class G_GNUC_UNUSED = va_arg(args, const char *); const char *provider G_GNUC_UNUSED = va_arg(args, const char *); const char *type G_GNUC_UNUSED = va_arg(args, const char *); crm_exit_t rc = va_arg(args, crm_exit_t); const char *exit_reason = va_arg(args, const char *); char *exit_s = pcmk__itoa(rc); const char *message = crm_exit_str(rc); char *status_s = pcmk__itoa(status); const char *execution_message = pcmk_exec_status_str(status); pcmk__output_create_xml_node(out, PCMK_XE_AGENT_STATUS, PCMK_XA_CODE, exit_s, PCMK_XA_MESSAGE, message, PCMK_XA_EXECUTION_CODE, status_s, PCMK_XA_EXECUTION_MESSAGE, execution_message, PCMK_XA_REASON, exit_reason, NULL); free(exit_s); free(status_s); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *", "const char *") static int attribute_list_text(pcmk__output_t *out, va_list args) { pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); const char *attr = va_arg(args, char *); const char *value = va_arg(args, const char *); if (value != NULL) { pcmk__formatted_printf(out, "%s\n", value); return pcmk_rc_ok; } else { out->err(out, "Attribute '%s' not found for '%s'", attr, rsc->id); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *") static int override_default(pcmk__output_t *out, va_list args) { const char *rsc_name = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); if (rsc_name == NULL) { out->list_item(out, NULL, "Overriding the cluster configuration with '%s' = '%s'", name, value); } else { out->list_item(out, NULL, "Overriding the cluster configuration for '%s' with '%s' = '%s'", rsc_name, name, value); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("override", "const char *", "const char *", "const char *") static int override_xml(pcmk__output_t *out, va_list args) { const char *rsc_name = va_arg(args, const char *); const char *name = va_arg(args, const char *); const char *value = va_arg(args, const char *); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_OVERRIDE, PCMK_XA_NAME, name, PCMK_XA_VALUE, value, NULL); if (rsc_name != NULL) { pcmk__xe_set(node, PCMK_XA_RSC, rsc_name); } return pcmk_rc_ok; } // Does not modify overrides or its contents PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "GHashTable *", "crm_exit_t", "int", "const char *", "const char *", "const char *") static int resource_agent_action_default(pcmk__output_t *out, va_list args) { int verbose = va_arg(args, int); const char *class = va_arg(args, const char *); const char *provider = va_arg(args, const char *); const char *type = va_arg(args, const char *); const char *rsc_name = va_arg(args, const char *); const char *action = va_arg(args, const char *); GHashTable *overrides = va_arg(args, GHashTable *); crm_exit_t rc = va_arg(args, crm_exit_t); int status = va_arg(args, int); const char *exit_reason = va_arg(args, const char *); const char *stdout_data = va_arg(args, const char *); const char *stderr_data = va_arg(args, const char *); if (overrides) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; out->begin_list(out, NULL, NULL, PCMK_XE_OVERRIDES); g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { out->message(out, "override", rsc_name, name, value); } out->end_list(out); } out->message(out, "agent-status", status, action, rsc_name, class, provider, type, rc, exit_reason); /* hide output for validate-all if not in verbose */ if ((verbose == 0) && pcmk__str_eq(action, PCMK_ACTION_VALIDATE_ALL, pcmk__str_casei)) { return pcmk_rc_ok; } if (stdout_data || stderr_data) { xmlNodePtr doc = NULL; if (stdout_data != NULL) { doc = pcmk__xml_parse(stdout_data); } if (doc != NULL) { out->output_xml(out, PCMK_XE_COMMAND, stdout_data); pcmk__xml_free(doc); } else { out->subprocess_output(out, rc, stdout_data, stderr_data); } } return pcmk_rc_ok; } // Does not modify overrides or its contents PCMK__OUTPUT_ARGS("resource-agent-action", "int", "const char *", "const char *", "const char *", "const char *", "const char *", "GHashTable *", "crm_exit_t", "int", "const char *", "const char *", "const char *") static int resource_agent_action_xml(pcmk__output_t *out, va_list args) { int verbose G_GNUC_UNUSED = va_arg(args, int); const char *class = va_arg(args, const char *); const char *provider = va_arg(args, const char *); const char *type = va_arg(args, const char *); const char *rsc_name = va_arg(args, const char *); const char *action = va_arg(args, const char *); GHashTable *overrides = va_arg(args, GHashTable *); crm_exit_t rc = va_arg(args, crm_exit_t); int status = va_arg(args, int); const char *exit_reason = va_arg(args, const char *); const char *stdout_data = va_arg(args, const char *); const char *stderr_data = va_arg(args, const char *); xmlNodePtr node = NULL; node = pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE_AGENT_ACTION, PCMK_XA_ACTION, action, PCMK_XA_CLASS, class, PCMK_XA_TYPE, type, NULL); if (rsc_name) { pcmk__xe_set(node, PCMK_XA_RSC, rsc_name); } pcmk__xe_set(node, PCMK_XA_PROVIDER, provider); if (overrides) { GHashTableIter iter; const char *name = NULL; const char *value = NULL; out->begin_list(out, NULL, NULL, PCMK_XE_OVERRIDES); g_hash_table_iter_init(&iter, overrides); while (g_hash_table_iter_next(&iter, (gpointer *) &name, (gpointer *) &value)) { out->message(out, "override", rsc_name, name, value); } out->end_list(out); } out->message(out, "agent-status", status, action, rsc_name, class, provider, type, rc, exit_reason); if (stdout_data || stderr_data) { xmlNodePtr doc = NULL; if (stdout_data != NULL) { doc = pcmk__xml_parse(stdout_data); } if (doc != NULL) { out->output_xml(out, PCMK_XE_COMMAND, stdout_data); pcmk__xml_free(doc); } else { out->subprocess_output(out, rc, stdout_data, stderr_data); } } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") static int resource_check_list_default(pcmk__output_t *out, va_list args) { resource_checks_t *checks = va_arg(args, resource_checks_t *); const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false); const pcmk_scheduler_t *scheduler = checks->rsc->priv->scheduler; if (checks->flags == 0) { return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Resource Checks"); if (pcmk__is_set(checks->flags, rsc_remain_stopped)) { out->list_item(out, "check", "Configuration specifies '%s' should remain stopped", parent->id); } if (pcmk__is_set(checks->flags, rsc_unpromotable)) { out->list_item(out, "check", "Configuration specifies '%s' should not be promoted", parent->id); } if (pcmk__is_set(checks->flags, rsc_unmanaged)) { out->list_item(out, "check", "Configuration prevents cluster from stopping or starting unmanaged '%s'", parent->id); } if (pcmk__is_set(checks->flags, rsc_locked)) { out->list_item(out, "check", "'%s' is locked to node %s due to shutdown", parent->id, checks->lock_node); } if (pcmk__is_set(checks->flags, rsc_node_health)) { out->list_item(out, "check", "'%s' cannot run on unhealthy nodes due to " PCMK_OPT_NODE_HEALTH_STRATEGY "='%s'", parent->id, pcmk__cluster_option(scheduler->priv->options, PCMK_OPT_NODE_HEALTH_STRATEGY)); } out->end_list(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-check-list", "resource_checks_t *") static int resource_check_list_xml(pcmk__output_t *out, va_list args) { resource_checks_t *checks = va_arg(args, resource_checks_t *); const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false); xmlNodePtr node = pcmk__output_create_xml_node(out, PCMK_XE_CHECK, PCMK_XA_ID, parent->id, NULL); if (pcmk__is_set(checks->flags, rsc_remain_stopped)) { - pcmk__xe_set_bool_attr(node, PCMK_XA_REMAIN_STOPPED, true); + pcmk__xe_set_bool(node, PCMK_XA_REMAIN_STOPPED, true); } if (pcmk__is_set(checks->flags, rsc_unpromotable)) { - pcmk__xe_set_bool_attr(node, PCMK_XA_PROMOTABLE, false); + pcmk__xe_set_bool(node, PCMK_XA_PROMOTABLE, false); } if (pcmk__is_set(checks->flags, rsc_unmanaged)) { - pcmk__xe_set_bool_attr(node, PCMK_XA_UNMANAGED, true); + pcmk__xe_set_bool(node, PCMK_XA_UNMANAGED, true); } if (pcmk__is_set(checks->flags, rsc_locked)) { pcmk__xe_set(node, PCMK_XA_LOCKED_TO_HYPHEN, checks->lock_node); } if (pcmk__is_set(checks->flags, rsc_node_health)) { - pcmk__xe_set_bool_attr(node, PCMK_XA_UNHEALTHY, true); + pcmk__xe_set_bool(node, PCMK_XA_UNHEALTHY, true); } return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "const gchar *") static int resource_search_list_default(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); const gchar *requested_name = va_arg(args, const gchar *); bool printed = false; int rc = pcmk_rc_no_output; if (!out->is_quiet(out) && nodes == NULL) { out->err(out, "resource %s is NOT running", requested_name); return rc; } for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { node_info_t *ni = (node_info_t *) lpc->data; if (!printed) { out->begin_list(out, NULL, NULL, "Nodes"); printed = true; rc = pcmk_rc_ok; } if (out->is_quiet(out)) { out->list_item(out, "node", "%s", ni->node_name); } else { const char *role_text = ""; if (ni->promoted) { role_text = " " PCMK_ROLE_PROMOTED; } out->list_item(out, "node", "resource %s is running on: %s%s", requested_name, ni->node_name, role_text); } } if (printed) { out->end_list(out); } return rc; } PCMK__OUTPUT_ARGS("resource-search-list", "GList *", "const gchar *") static int resource_search_list_xml(pcmk__output_t *out, va_list args) { GList *nodes = va_arg(args, GList *); const gchar *requested_name = va_arg(args, const gchar *); pcmk__output_xml_create_parent(out, PCMK_XE_NODES, PCMK_XA_RESOURCE, requested_name, NULL); for (GList *lpc = nodes; lpc != NULL; lpc = lpc->next) { node_info_t *ni = (node_info_t *) lpc->data; xmlNodePtr sub_node = pcmk__output_create_xml_text_node(out, PCMK_XE_NODE, ni->node_name); if (ni->promoted) { pcmk__xe_set(sub_node, PCMK_XA_STATE, "promoted"); } } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *", "pcmk_node_t *") static int resource_reasons_list_default(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *host_uname = (node == NULL)? NULL : node->priv->name; out->begin_list(out, NULL, NULL, "Resource Reasons"); if ((rsc == NULL) && (host_uname == NULL)) { GList *lpc = NULL; GList *hosts = NULL; for (lpc = resources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); if (hosts == NULL) { out->list_item(out, "reason", "Resource %s is not running", rsc->id); } else { out->list_item(out, "reason", "Resource %s is running", rsc->id); } cli_resource_check(out, rsc, NULL); g_list_free(hosts); hosts = NULL; } } else if ((rsc != NULL) && (host_uname != NULL)) { if (resource_is_running_on(rsc, host_uname)) { out->list_item(out, "reason", "Resource %s is running on host %s", rsc->id, host_uname); } else { out->list_item(out, "reason", "Resource %s is not running on host %s", rsc->id, host_uname); } cli_resource_check(out, rsc, node); } else if ((rsc == NULL) && (host_uname != NULL)) { const char* host_uname = node->priv->name; GList *allResources = node->priv->assigned_resources; GList *activeResources = node->details->running_rsc; GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); GList *lpc = NULL; for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; out->list_item(out, "reason", "Resource %s is running on host %s", rsc->id, host_uname); cli_resource_check(out, rsc, node); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; out->list_item(out, "reason", "Resource %s is assigned to host %s but not running", rsc->id, host_uname); cli_resource_check(out, rsc, node); } g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } else if ((rsc != NULL) && (host_uname == NULL)) { GList *hosts = NULL; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); out->list_item(out, "reason", "Resource %s is %srunning", rsc->id, (hosts? "" : "not ")); cli_resource_check(out, rsc, NULL); g_list_free(hosts); } out->end_list(out); return pcmk_rc_ok; } PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *", "pcmk_node_t *") static int resource_reasons_list_xml(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *); pcmk_node_t *node = va_arg(args, pcmk_node_t *); const char *host_uname = (node == NULL)? NULL : node->priv->name; xmlNodePtr xml_node = pcmk__output_xml_create_parent(out, PCMK_XE_REASON, NULL); if ((rsc == NULL) && (host_uname == NULL)) { GList *lpc = NULL; GList *hosts = NULL; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL); for (lpc = resources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; const char *running = NULL; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); running = pcmk__btoa(hosts != NULL); pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc->id, PCMK_XA_RUNNING, running, NULL); cli_resource_check(out, rsc, NULL); pcmk__output_xml_pop_parent(out); g_list_free(hosts); hosts = NULL; } pcmk__output_xml_pop_parent(out); } else if ((rsc != NULL) && (host_uname != NULL)) { if (resource_is_running_on(rsc, host_uname)) { pcmk__xe_set(xml_node, PCMK_XA_RUNNING_ON, host_uname); } cli_resource_check(out, rsc, node); } else if ((rsc == NULL) && (host_uname != NULL)) { const char* host_uname = node->priv->name; GList *allResources = node->priv->assigned_resources; GList *activeResources = node->details->running_rsc; GList *unactiveResources = pcmk__subtract_lists(allResources, activeResources, (GCompareFunc) strcmp); GList *lpc = NULL; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCES, NULL); for (lpc = activeResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc->id, PCMK_XA_RUNNING, PCMK_VALUE_TRUE, PCMK_XA_HOST, host_uname, NULL); cli_resource_check(out, rsc, node); pcmk__output_xml_pop_parent(out); } for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) { pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data; pcmk__output_xml_create_parent(out, PCMK_XE_RESOURCE, PCMK_XA_ID, rsc->id, PCMK_XA_RUNNING, PCMK_VALUE_FALSE, PCMK_XA_HOST, host_uname, NULL); cli_resource_check(out, rsc, node); pcmk__output_xml_pop_parent(out); } pcmk__output_xml_pop_parent(out); g_list_free(allResources); g_list_free(activeResources); g_list_free(unactiveResources); } else if ((rsc != NULL) && (host_uname == NULL)) { GList *hosts = NULL; rsc->priv->fns->location(rsc, &hosts, pcmk__rsc_node_current); pcmk__xe_set(xml_node, PCMK_XA_RUNNING, pcmk__btoa(hosts != NULL)); cli_resource_check(out, rsc, NULL); g_list_free(hosts); } pcmk__output_xml_pop_parent(out); return pcmk_rc_ok; } static void add_resource_name(pcmk_resource_t *rsc, pcmk__output_t *out) { if (rsc->priv->children == NULL) { /* Sometimes PCMK_XE_RESOURCE might act as a PCMK_XA_NAME instead of an * XML element name, depending on whether pcmk__output_enable_list_element * was called. */ out->list_item(out, PCMK_XE_RESOURCE, "%s", rsc->id); } else { g_list_foreach(rsc->priv->children, (GFunc) add_resource_name, out); } } PCMK__OUTPUT_ARGS("resource-names-list", "GList *") static int resource_names(pcmk__output_t *out, va_list args) { GList *resources = va_arg(args, GList *); if (resources == NULL) { out->err(out, "NO resources configured\n"); return pcmk_rc_no_output; } out->begin_list(out, NULL, NULL, "Resource Names"); g_list_foreach(resources, (GFunc) add_resource_name, out); out->end_list(out); return pcmk_rc_ok; } static pcmk__message_entry_t fmt_functions[] = { { "agent-status", "default", agent_status_default }, { "agent-status", "xml", agent_status_xml }, { "attribute-changed", "default", attribute_changed_default }, { "attribute-changed", "xml", attribute_changed_xml }, { "attribute-changed-list", "default", attribute_changed_list_default }, { "attribute-changed-list", "xml", attribute_changed_list_xml }, { "attribute-list", "default", attribute_list_default }, { "attribute-list", "text", attribute_list_text }, { "override", "default", override_default }, { "override", "xml", override_xml }, { "resource-agent-action", "default", resource_agent_action_default }, { "resource-agent-action", "xml", resource_agent_action_xml }, { "resource-check-list", "default", resource_check_list_default }, { "resource-check-list", "xml", resource_check_list_xml }, { "resource-search-list", "default", resource_search_list_default }, { "resource-search-list", "xml", resource_search_list_xml }, { "resource-reasons-list", "default", resource_reasons_list_default }, { "resource-reasons-list", "xml", resource_reasons_list_xml }, { "resource-names-list", "default", resource_names }, { NULL, NULL, NULL } }; void crm_resource_register_messages(pcmk__output_t *out) { pcmk__register_messages(out, fmt_functions); }